3816 lines
177 KiB
Go Template
3816 lines
177 KiB
Go Template
# Copyright Broadcom, Inc. All Rights Reserved.
|
|
# SPDX-License-Identifier: APACHE-2.0
|
|
|
|
## @section Global parameters
|
|
## Global Docker image parameters
|
|
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
|
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
|
##
|
|
|
|
## @param global.imageRegistry Global Docker image registry
|
|
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
|
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
|
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
|
|
##
|
|
global:
|
|
imageRegistry: ""
|
|
## E.g.
|
|
## imagePullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
imagePullSecrets: []
|
|
defaultStorageClass: {{ .Values.globals.harbor.storageClass }}
|
|
storageClass: ""
|
|
## Security parameters
|
|
##
|
|
security:
|
|
## @param global.security.allowInsecureImages Allows skipping image verification
|
|
allowInsecureImages: false
|
|
## Compatibility adaptations for Kubernetes platforms
|
|
##
|
|
compatibility:
|
|
## Compatibility adaptations for Openshift
|
|
##
|
|
openshift:
|
|
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
|
##
|
|
adaptSecurityContext: auto
|
|
## @section Common Parameters
|
|
##
|
|
|
|
## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
|
|
##
|
|
nameOverride: ""
|
|
## @param fullnameOverride String to fully override common.names.fullname template with a string
|
|
##
|
|
fullnameOverride: ""
|
|
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
|
##
|
|
kubeVersion: ""
|
|
## @param clusterDomain Kubernetes Cluster Domain
|
|
##
|
|
clusterDomain: cluster.local
|
|
## @param commonAnnotations Annotations to add to all deployed objects
|
|
##
|
|
commonAnnotations: {}
|
|
## @param commonLabels Labels to add to all deployed objects
|
|
##
|
|
commonLabels: {}
|
|
## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template).
|
|
##
|
|
extraDeploy: []
|
|
## Enable diagnostic mode in the deployment(s)/statefulset(s)
|
|
##
|
|
diagnosticMode:
|
|
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
|
|
##
|
|
enabled: false
|
|
## @param diagnosticMode.command Command to override all containers in the the deployment(s)/statefulset(s)
|
|
##
|
|
command:
|
|
- sleep
|
|
## @param diagnosticMode.args Args to override all containers in the the deployment(s)/statefulset(s)
|
|
##
|
|
args:
|
|
- infinity
|
|
## @section Harbor common parameters
|
|
##
|
|
|
|
## @param adminPassword The initial password of Harbor admin. Change it from portal after launching Harbor
|
|
##
|
|
adminPassword: {{ .Values.globals.harbor.password }}
|
|
## @param externalURL The external URL for Harbor Core service
|
|
## It is used to
|
|
## 1) populate the docker/helm commands showed on portal
|
|
##
|
|
## Format: protocol://domain[:port]. Usually:
|
|
## 1) if "exposureType" is "ingress", the "domain" should be
|
|
## the value of "ingress.hostname"
|
|
## 2) if "exposureType" is "proxy" and "service.type" is "ClusterIP",
|
|
## the "domain" should be the value of "service.clusterIP"
|
|
## 3) if "exposureType" is "proxy" and "service.type" is "NodePort",
|
|
## the "domain" should be the IP address of k8s node
|
|
## 4) if "exposureType" is "proxy" and "service.type" is "LoadBalancer",
|
|
## the "domain" should be the LoadBalancer IP
|
|
##
|
|
externalURL: https://{{ .Values.globals.harbor.hostname }}
|
|
## Note: If Harbor is exposed via Ingress, the NGINX server will not be used
|
|
## @param proxy.httpProxy The URL of the HTTP proxy server
|
|
## @param proxy.httpsProxy The URL of the HTTPS proxy server
|
|
## @param proxy.noProxy The URLs that the proxy settings not apply to
|
|
## @param proxy.components The component list that the proxy settings apply to
|
|
##
|
|
proxy:
|
|
httpProxy: ""
|
|
httpsProxy: ""
|
|
noProxy: 127.0.0.1,localhost,.local,.internal
|
|
components:
|
|
- core
|
|
- jobservice
|
|
- trivy
|
|
## @param logLevel The log level used for Harbor services. Allowed values are [ fatal \| error \| warn \| info \| debug \| trace ]
|
|
##
|
|
logLevel: debug
|
|
## TLS settings
|
|
## Note: TLS cert files need to provided in each components in advance.
|
|
##
|
|
internalTLS:
|
|
## @param internalTLS.enabled Use TLS in all the supported containers: core, jobservice, portal, registry and trivy
|
|
##
|
|
enabled: false
|
|
## @param internalTLS.caBundleSecret Name of an existing secret with a custom CA that will be injected into the trust store for core, jobservice, registry, trivy components
|
|
## The secret must contain the key "ca.crt"
|
|
##
|
|
caBundleSecret: ""
|
|
## IP family parameters
|
|
##
|
|
ipFamily:
|
|
## @param ipFamily.ipv6.enabled Enable listening on IPv6 ([::]) for NGINX-based components (NGINX,portal)
|
|
## Note: enabling IPv6 will cause NGINX to crash on start on systems with IPv6 disabled (`ipv6.disable` kernel flag)
|
|
##
|
|
ipv6:
|
|
enabled: false
|
|
## @param ipFamily.ipv4.enabled Enable listening on IPv4 for NGINX-based components (NGINX,portal)
|
|
##
|
|
ipv4:
|
|
enabled: true
|
|
## @section Traffic Exposure Parameters
|
|
##
|
|
|
|
## @param exposureType The way to expose Harbor. Allowed values are [ ingress \| proxy ]
|
|
## Use "proxy" to use a deploy NGINX proxy in front of Harbor services
|
|
## Use "ingress" to use an Ingress Controller as proxy
|
|
##
|
|
exposureType: ingress
|
|
## Service parameters
|
|
##
|
|
service:
|
|
## @param service.type NGINX proxy service type
|
|
##
|
|
type: ClusterIP
|
|
## @param service.ports.http NGINX proxy service HTTP port
|
|
## @param service.ports.https NGINX proxy service HTTPS port
|
|
##
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
## Node ports to expose
|
|
## @param service.nodePorts.http Node port for HTTP
|
|
## @param service.nodePorts.https Node port for HTTPS
|
|
## NOTE: choose port between <30000-32767>
|
|
##
|
|
nodePorts:
|
|
http: ""
|
|
https: ""
|
|
## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
|
|
## Values: ClientIP or None
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
|
|
##
|
|
sessionAffinity: None
|
|
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
|
## sessionAffinityConfig:
|
|
## clientIP:
|
|
## timeoutSeconds: 300
|
|
##
|
|
sessionAffinityConfig: {}
|
|
## @param service.clusterIP NGINX proxy service Cluster IP
|
|
## e.g.:
|
|
## clusterIP: None
|
|
##
|
|
clusterIP: ""
|
|
## @param service.loadBalancerIP NGINX proxy service Load Balancer IP
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
|
##
|
|
loadBalancerIP: ""
|
|
## @param service.loadBalancerSourceRanges NGINX proxy service Load Balancer sources
|
|
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
|
## e.g:
|
|
## loadBalancerSourceRanges:
|
|
## - 10.10.10.0/24
|
|
##
|
|
loadBalancerSourceRanges: []
|
|
## @param service.externalTrafficPolicy NGINX proxy service external traffic policy
|
|
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
|
##
|
|
externalTrafficPolicy: Cluster
|
|
## @param service.annotations Additional custom annotations for NGINX proxy service
|
|
##
|
|
annotations: {}
|
|
## @param service.extraPorts Extra port to expose on NGINX proxy service
|
|
##
|
|
extraPorts: []
|
|
ingress:
|
|
## Configure the ingress resource that allows you to access Harbor Core
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
|
##
|
|
core:
|
|
## @param ingress.core.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
|
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
|
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
|
##
|
|
ingressClassName: {{ .Values.globals.harbor.ingressClass }}
|
|
## @param ingress.core.pathType Ingress path type
|
|
##
|
|
pathType: ImplementationSpecific
|
|
## @param ingress.core.apiVersion Force Ingress API version (automatically detected if not set)
|
|
##
|
|
apiVersion: ""
|
|
## @param ingress.core.controller The ingress controller type. Currently supports `default`, `gce` and `ncp`
|
|
## leave as `default` for most ingress controllers.
|
|
## set to `gce` if using the GCE ingress controller
|
|
## set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
|
|
##
|
|
controller: default
|
|
## @param ingress.core.hostname Default host for the ingress record
|
|
##
|
|
hostname: {{ .Values.globals.harbor.hostname }}
|
|
## @param ingress.core.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
|
## Use this parameter to set the required annotations for cert-manager, see
|
|
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
|
## e.g:
|
|
## annotations:
|
|
## kubernetes.io/ingress.class: nginx
|
|
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
|
##
|
|
annotations:
|
|
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
|
kubernetes.io/ingress.class: {{ .Values.globals.harbor.ingressClass }}
|
|
## @param ingress.core.tls Enable TLS configuration for the host defined at `ingress.core.hostname` parameter
|
|
## TLS certificates will be retrieved from a TLS secret with name:
|
|
## You can:
|
|
## - Use the `ingress.core.secrets` parameter to create this TLS secret
|
|
## - Rely on cert-manager to create it by setting the corresponding annotations
|
|
## - Rely on Helm to create self-signed certificates by setting `ingress.core.selfSigned=true`
|
|
##
|
|
tls: true
|
|
## @param ingress.core.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
|
##
|
|
selfSigned: false
|
|
## @param ingress.core.extraHosts An array with additional hostname(s) to be covered with the ingress record
|
|
## e.g:
|
|
## extraHosts:
|
|
## - name: core.harbor.domain
|
|
## path: /
|
|
##
|
|
extraHosts: []
|
|
## @param ingress.core.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
|
## e.g:
|
|
## extraPaths:
|
|
## - path: /*
|
|
## backend:
|
|
## serviceName: ssl-redirect
|
|
## servicePort: use-annotation
|
|
##
|
|
extraPaths: []
|
|
## @param ingress.core.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
|
## e.g:
|
|
## extraTls:
|
|
## - hosts:
|
|
## - core.harbor.domain
|
|
## secretName: core.harbor.domain-tls
|
|
##
|
|
extraTls: []
|
|
## @param ingress.core.secrets Custom TLS certificates as secrets
|
|
## NOTE: 'key' and 'certificate' are expected in PEM format
|
|
## NOTE: 'name' should line up with a 'secretName' set further up
|
|
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
|
|
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
|
|
## It is also possible to create and manage the certificates outside of this helm chart
|
|
## Please see README.md for more information
|
|
## e.g:
|
|
## secrets:
|
|
## - name: core.harbor.domain-tls
|
|
## key: |-
|
|
## REDACTED
|
|
## ...
|
|
## REDACTED
|
|
## certificate: |-
|
|
## -----BEGIN CERTIFICATE-----
|
|
## ...
|
|
## -----END CERTIFICATE-----
|
|
##
|
|
secrets: []
|
|
## @param ingress.core.extraRules Additional rules to be covered with this ingress record
|
|
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
|
## e.g:
|
|
## extraRules:
|
|
## - host: example.local
|
|
## http:
|
|
## path: /
|
|
## backend:
|
|
## service:
|
|
## name: example-svc
|
|
## port:
|
|
## name: http
|
|
##
|
|
extraRules: []
|
|
##
|
|
## @section Persistence Parameters
|
|
##
|
|
|
|
## The persistence is enabled by default and a default StorageClass
|
|
## is needed in the k8s cluster to provision volumes dynamically.
|
|
## Specify another StorageClass in the "storageClass" or set "existingClaim"
|
|
## if you have already existing persistent volumes to use
|
|
##
|
|
## For storing images and charts, you can also use "azure", "gcs", "s3",
|
|
## "swift" or "oss". Set it in the "imageChartStorage" section
|
|
##
|
|
persistence:
|
|
## @param persistence.enabled Enable the data persistence or not
|
|
##
|
|
enabled: true
|
|
## Resource Policy
|
|
## @param persistence.resourcePolicy Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted
|
|
##
|
|
resourcePolicy: "keep"
|
|
persistentVolumeClaim:
|
|
## @param persistence.persistentVolumeClaim.registry.existingClaim Name of an existing PVC to use
|
|
## @param persistence.persistentVolumeClaim.registry.storageClass PVC Storage Class for Harbor Registry data volume
|
|
## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
|
|
## @param persistence.persistentVolumeClaim.registry.subPath The sub path used in the volume
|
|
## @param persistence.persistentVolumeClaim.registry.accessModes The access mode of the volume
|
|
## @param persistence.persistentVolumeClaim.registry.size The size of the volume
|
|
## @param persistence.persistentVolumeClaim.registry.annotations Annotations for the PVC
|
|
## @param persistence.persistentVolumeClaim.registry.selector Selector to match an existing Persistent Volume
|
|
##
|
|
registry:
|
|
existingClaim: ""
|
|
storageClass: ""
|
|
subPath: ""
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
size: 5Gi
|
|
annotations: {}
|
|
selector: {}
|
|
## @param persistence.persistentVolumeClaim.jobservice.existingClaim Name of an existing PVC to use
|
|
## @param persistence.persistentVolumeClaim.jobservice.storageClass PVC Storage Class for Harbor Jobservice data volume
|
|
## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
|
|
## @param persistence.persistentVolumeClaim.jobservice.subPath The sub path used in the volume
|
|
## @param persistence.persistentVolumeClaim.jobservice.accessModes The access mode of the volume
|
|
## @param persistence.persistentVolumeClaim.jobservice.size The size of the volume
|
|
## @param persistence.persistentVolumeClaim.jobservice.annotations Annotations for the PVC
|
|
## @param persistence.persistentVolumeClaim.jobservice.selector Selector to match an existing Persistent Volume
|
|
##
|
|
jobservice:
|
|
existingClaim: ""
|
|
storageClass: ""
|
|
subPath: ""
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
size: 1Gi
|
|
annotations: {}
|
|
selector: {}
|
|
## @param persistence.persistentVolumeClaim.trivy.storageClass PVC Storage Class for Trivy data volume
|
|
## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
|
|
## @param persistence.persistentVolumeClaim.trivy.accessModes The access mode of the volume
|
|
## @param persistence.persistentVolumeClaim.trivy.size The size of the volume
|
|
## @param persistence.persistentVolumeClaim.trivy.annotations Annotations for the PVC
|
|
## @param persistence.persistentVolumeClaim.trivy.selector Selector to match an existing Persistent Volume
|
|
##
|
|
trivy:
|
|
storageClass: ""
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
size: 5Gi
|
|
annotations: {}
|
|
selector: {}
|
|
## Define which storage backend is used for registry to store
|
|
## images and charts.
|
|
## ref: https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
|
|
##
|
|
imageChartStorage:
|
|
## @param persistence.imageChartStorage.caBundleSecret Specify the `caBundleSecret` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's containers.
|
|
##
|
|
caBundleSecret: ""
|
|
## @param persistence.imageChartStorage.disableredirect The configuration for managing redirects from content backends. For backends which do not supported it (such as using MinIO® for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more information about the detail
|
|
##
|
|
disableredirect: false
|
|
## @param persistence.imageChartStorage.type The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more information about the detail
|
|
##
|
|
type: filesystem
|
|
## Images/charts storage parameters when type is "filesystem"
|
|
## @param persistence.imageChartStorage.filesystem.rootdirectory Filesystem storage type setting: Storage root directory
|
|
## @param persistence.imageChartStorage.filesystem.maxthreads Filesystem storage type setting: Maximum threads directory
|
|
##
|
|
filesystem:
|
|
rootdirectory: /storage
|
|
maxthreads: ""
|
|
## Images/charts storage parameters when type is "azure"
|
|
## @param persistence.imageChartStorage.azure.accountname Azure storage type setting: Name of the Azure account
|
|
## @param persistence.imageChartStorage.azure.accountkey Azure storage type setting: Key of the Azure account
|
|
## @param persistence.imageChartStorage.azure.container Azure storage type setting: Container
|
|
## @param persistence.imageChartStorage.azure.storagePrefix Azure storage type setting: Storage prefix
|
|
## @param persistence.imageChartStorage.azure.realm Azure storage type setting: Realm of the Azure account
|
|
##
|
|
azure:
|
|
accountname: accountname
|
|
accountkey: base64encodedaccountkey
|
|
container: containername
|
|
storagePrefix: /azure/harbor/charts
|
|
## Example realm
|
|
## realm: core.windows.net
|
|
##
|
|
realm: ""
|
|
## Images/charts storage parameters when type is "gcs"
|
|
## @param persistence.imageChartStorage.gcs.bucket GCS storage type setting: Bucket name
|
|
## @param persistence.imageChartStorage.gcs.encodedkey GCS storage type setting: Base64 encoded key
|
|
## @param persistence.imageChartStorage.gcs.rootdirectory GCS storage type setting: Root directory name
|
|
## @param persistence.imageChartStorage.gcs.chunksize GCS storage type setting: Chunk size name
|
|
##
|
|
gcs:
|
|
bucket: bucketname
|
|
## The base64 encoded json file which contains the gcs key (file's content)
|
|
##
|
|
encodedkey: ""
|
|
rootdirectory: ""
|
|
chunksize: ""
|
|
## Images/charts storage parameters when type is "s3"
|
|
## ref: https://docs.docker.com/registry/storage-drivers/s3/
|
|
## @param persistence.imageChartStorage.s3.region S3 storage type setting: Region
|
|
## @param persistence.imageChartStorage.s3.bucket S3 storage type setting: Bucket name
|
|
## @param persistence.imageChartStorage.s3.accesskey S3 storage type setting: Access key name
|
|
## @param persistence.imageChartStorage.s3.secretkey S3 storage type setting: Secret Key name
|
|
## @param persistence.imageChartStorage.s3.regionendpoint S3 storage type setting: Region Endpoint
|
|
## @param persistence.imageChartStorage.s3.encrypt S3 storage type setting: Encrypt
|
|
## @param persistence.imageChartStorage.s3.keyid S3 storage type setting: Key ID
|
|
## @param persistence.imageChartStorage.s3.secure S3 storage type setting: Secure
|
|
## @param persistence.imageChartStorage.s3.skipverify S3 storage type setting: TLS skip verification
|
|
## @param persistence.imageChartStorage.s3.v4auth S3 storage type setting: V4 authorization
|
|
## @param persistence.imageChartStorage.s3.chunksize S3 storage type setting: V4 authorization
|
|
## @param persistence.imageChartStorage.s3.rootdirectory S3 storage type setting: Root directory name
|
|
## @param persistence.imageChartStorage.s3.storageClass S3 storage type setting: Storage class
|
|
## @param persistence.imageChartStorage.s3.sse S3 storage type setting: SSE name
|
|
## @param persistence.imageChartStorage.s3.multipartcopythresholdsize S3 storage type setting: Threshold size for multipart copy
|
|
##
|
|
s3:
|
|
region: us-west-1
|
|
bucket: bucketname
|
|
accesskey: ""
|
|
secretkey: ""
|
|
regionendpoint: ""
|
|
encrypt: ""
|
|
keyid: ""
|
|
secure: ""
|
|
skipverify: ""
|
|
v4auth: ""
|
|
chunksize: ""
|
|
rootdirectory: ""
|
|
storageClass: ""
|
|
sse: ""
|
|
multipartcopythresholdsize: ""
|
|
## Images/charts storage parameters when type is "swift"
|
|
## @param persistence.imageChartStorage.swift.authurl Swift storage type setting: Authentication url
|
|
## @param persistence.imageChartStorage.swift.username Swift storage type setting: Authentication url
|
|
## @param persistence.imageChartStorage.swift.password Swift storage type setting: Password
|
|
## @param persistence.imageChartStorage.swift.container Swift storage type setting: Container
|
|
## @param persistence.imageChartStorage.swift.region Swift storage type setting: Region
|
|
## @param persistence.imageChartStorage.swift.tenant Swift storage type setting: Tenant
|
|
## @param persistence.imageChartStorage.swift.tenantid Swift storage type setting: TenantID
|
|
## @param persistence.imageChartStorage.swift.domain Swift storage type setting: Domain
|
|
## @param persistence.imageChartStorage.swift.domainid Swift storage type setting: DomainID
|
|
## @param persistence.imageChartStorage.swift.trustid Swift storage type setting: TrustID
|
|
## @param persistence.imageChartStorage.swift.insecureskipverify Swift storage type setting: Verification
|
|
## @param persistence.imageChartStorage.swift.chunksize Swift storage type setting: Chunk
|
|
## @param persistence.imageChartStorage.swift.prefix Swift storage type setting: Prefix
|
|
## @param persistence.imageChartStorage.swift.secretkey Swift storage type setting: Secre Key
|
|
## @param persistence.imageChartStorage.swift.accesskey Swift storage type setting: Access Key
|
|
## @param persistence.imageChartStorage.swift.authversion Swift storage type setting: Auth
|
|
## @param persistence.imageChartStorage.swift.endpointtype Swift storage type setting: Endpoint
|
|
## @param persistence.imageChartStorage.swift.tempurlcontainerkey Swift storage type setting: Temp URL container key
|
|
## @param persistence.imageChartStorage.swift.tempurlmethods Swift storage type setting: Temp URL methods
|
|
##
|
|
swift:
|
|
authurl: https://storage.myprovider.com/v3/auth
|
|
username: ""
|
|
password: ""
|
|
container: ""
|
|
region: ""
|
|
tenant: ""
|
|
tenantid: ""
|
|
domain: ""
|
|
domainid: ""
|
|
trustid: ""
|
|
insecureskipverify: ""
|
|
chunksize: ""
|
|
prefix: ""
|
|
secretkey: ""
|
|
accesskey: ""
|
|
authversion: ""
|
|
endpointtype: ""
|
|
tempurlcontainerkey: ""
|
|
tempurlmethods: ""
|
|
## Images/charts storage parameters when type is "oss"
|
|
## @param persistence.imageChartStorage.oss.accesskeyid OSS storage type setting: Access key ID
|
|
## @param persistence.imageChartStorage.oss.accesskeysecret OSS storage type setting: Access key secret name containing the token
|
|
## @param persistence.imageChartStorage.oss.region OSS storage type setting: Region name
|
|
## @param persistence.imageChartStorage.oss.bucket OSS storage type setting: Bucket name
|
|
## @param persistence.imageChartStorage.oss.endpoint OSS storage type setting: Endpoint
|
|
## @param persistence.imageChartStorage.oss.internal OSS storage type setting: Internal
|
|
## @param persistence.imageChartStorage.oss.encrypt OSS storage type setting: Encrypt
|
|
## @param persistence.imageChartStorage.oss.secure OSS storage type setting: Secure
|
|
## @param persistence.imageChartStorage.oss.chunksize OSS storage type setting: Chunk
|
|
## @param persistence.imageChartStorage.oss.rootdirectory OSS storage type setting: Directory
|
|
## @param persistence.imageChartStorage.oss.secretkey OSS storage type setting: Secret key
|
|
##
|
|
oss:
|
|
accesskeyid: ""
|
|
accesskeysecret: ""
|
|
region: ""
|
|
bucket: ""
|
|
endpoint: ""
|
|
internal: ""
|
|
encrypt: ""
|
|
secure: ""
|
|
chunksize: ""
|
|
rootdirectory: ""
|
|
secretkey: ""
|
|
## @section Tracing parameters
|
|
##
|
|
|
|
## Tracing parameters:
|
|
## tracing: Configure tracing for Harbor, only one of tracing.jeager.enabled and tracing.otel.enabled should be set
|
|
##
|
|
tracing:
|
|
## @param tracing.enabled Enable tracing
|
|
##
|
|
enabled: false
|
|
## @param tracing.sampleRate Tracing sample rate from 0 to 1
|
|
##
|
|
sampleRate: 1
|
|
## @param tracing.namespace Used to differentiate traces between different harbor services
|
|
##
|
|
namespace: ""
|
|
## @param tracing.attributes A key value dict containing user defined attributes used to initialize the trace provider
|
|
## e.g:
|
|
## attributes:
|
|
## application: harbor
|
|
##
|
|
attributes: {}
|
|
## @extra tracing.jaeger Configuration for exporting to jaeger. If using jaeger collector mode, use endpoint, username and password. If using jaeger agent mode, use agentHostname and agentPort.
|
|
## e.g:
|
|
## jaeger:
|
|
## enabled: true
|
|
## endpoint: http://hostname:14268/api/traces
|
|
## username: "jaeger-username"
|
|
## password: "jaeger-password"
|
|
## @param tracing.jaeger.enabled Enable jaeger export
|
|
## @param tracing.jaeger.endpoint Jaeger endpoint
|
|
## @param tracing.jaeger.username Jaeger username
|
|
## @param tracing.jaeger.password Jaeger password
|
|
## @param tracing.jaeger.agentHost Jaeger agent hostname
|
|
## @param tracing.jaeger.agentPort Jaeger agent port
|
|
##
|
|
jaeger:
|
|
enabled: false
|
|
endpoint: ""
|
|
username: ""
|
|
password: ""
|
|
agentHost: ""
|
|
agentPort: ""
|
|
## @extra tracing.otel Configuration for exporting to an otel endpoint
|
|
## @param tracing.otel.enabled Enable otel export
|
|
## @param tracing.otel.endpoint The hostname and port for an otel compatible backend
|
|
## @param tracing.otel.urlpath Url path of otel endpoint
|
|
## @param tracing.otel.compression Enable data compression
|
|
## @param tracing.otel.timeout The timeout for data transfer
|
|
## @param tracing.otel.insecure Ignore cert verification for otel backend
|
|
##
|
|
otel:
|
|
enabled: false
|
|
endpoint: "hostname:4318"
|
|
urlpath: "/v1/traces"
|
|
compression: false
|
|
timeout: 10s
|
|
insecure: true
|
|
## @section Volume Permissions parameters
|
|
##
|
|
|
|
## Init containers parameters:
|
|
## certificateVolume: Copy /etc/ssl/certs to a volume so that they can be updated when a read-only volume is in use.
|
|
##
|
|
certificateVolume:
|
|
## Init container resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param certificateVolume.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if certificateVolume.resources is set (certificateVolume.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "nano"
|
|
## @param certificateVolume.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
|
|
##
|
|
volumePermissions:
|
|
## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
|
|
##
|
|
enabled: false
|
|
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
|
|
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
|
|
## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
|
|
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
|
|
## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/os-shell
|
|
tag: 12-debian-12-r35
|
|
digest: ""
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## Example:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Init container resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "nano"
|
|
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Init container' Security Context
|
|
## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
|
|
## and not the below volumePermissions.containerSecurityContext.runAsUser
|
|
## @param volumePermissions.containerSecurityContext.enabled Enable init container Security Context
|
|
## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 0
|
|
## @section NGINX Parameters
|
|
##
|
|
nginx:
|
|
## Bitnami NGINX image
|
|
## ref: https://hub.docker.com/r/bitnami/nginx/tags/
|
|
## @param nginx.image.registry [default: REGISTRY_NAME] NGINX image registry
|
|
## @param nginx.image.repository [default: REPOSITORY_NAME/nginx] NGINX image repository
|
|
## @skip nginx.image.tag NGINX image tag (immutable tags are recommended)
|
|
## @param nginx.image.digest NGINX image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param nginx.image.pullPolicy NGINX image pull policy
|
|
## @param nginx.image.pullSecrets NGINX image pull secrets
|
|
## @param nginx.image.debug Enable NGINX image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/nginx
|
|
tag: 1.27.3-debian-12-r5
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## TLS parameters
|
|
##
|
|
tls:
|
|
## @param nginx.tls.enabled Enable TLS termination
|
|
##
|
|
enabled: true
|
|
## @param nginx.tls.existingSecret Existing secret name containing your own TLS certificates.
|
|
## The secret must contain the keys:
|
|
## `tls.crt` - the certificate (required),
|
|
## `tls.key` - the private key (required),
|
|
## `ca.crt` - CA certificate (optional)
|
|
## Self-signed TLS certificates will be used otherwise.
|
|
##
|
|
existingSecret: ""
|
|
## @param nginx.tls.commonName The common name used to generate the self-signed TLS certificates
|
|
##
|
|
commonName: core.harbor.domain
|
|
## @param nginx.behindReverseProxy If NGINX is behind another reverse proxy, set to true
|
|
## if the reverse proxy already provides the 'X-Forwarded-Proto' header field.
|
|
## This is, for example, the case for the OpenShift HAProxy router.
|
|
##
|
|
behindReverseProxy: false
|
|
## @param nginx.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param nginx.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param nginx.extraEnvVars Array with extra environment variables to add NGINX pods
|
|
##
|
|
extraEnvVars: []
|
|
## @param nginx.extraEnvVarsCM ConfigMap containing extra environment variables for NGINX pods
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param nginx.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for NGINX pods
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param nginx.containerPorts.http NGINX HTTP container port
|
|
## @param nginx.containerPorts.https NGINX HTTPS container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
## @param nginx.replicaCount Number of NGINX replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure extra options for NGINX containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param nginx.livenessProbe.enabled Enable livenessProbe on NGINX containers
|
|
## @param nginx.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param nginx.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param nginx.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param nginx.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param nginx.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param nginx.readinessProbe.enabled Enable readinessProbe on NGINX containers
|
|
## @param nginx.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param nginx.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param nginx.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param nginx.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param nginx.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param nginx.startupProbe.enabled Enable startupProbe on NGINX containers
|
|
## @param nginx.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param nginx.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param nginx.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param nginx.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param nginx.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param nginx.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param nginx.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param nginx.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## NGINX resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if nginx.resources is set (nginx.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param nginx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure NGINX pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param nginx.podSecurityContext.enabled Enabled NGINX pods' Security Context
|
|
## @param nginx.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param nginx.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param nginx.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param nginx.podSecurityContext.fsGroup Set NGINX pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure NGINX containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param nginx.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param nginx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param nginx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param nginx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param nginx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param nginx.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param nginx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param nginx.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param nginx.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param nginx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param nginx.updateStrategy.type NGINX deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param nginx.lifecycleHooks LifecycleHook for the NGINX container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param nginx.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Nginx ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param nginx.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param nginx.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param nginx.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param nginx.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## @param nginx.hostAliases NGINX pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param nginx.podLabels Add additional labels to the NGINX pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param nginx.podAnnotations Annotations to add to the NGINX pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param nginx.podAffinityPreset NGINX Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param nginx.podAntiAffinityPreset NGINX Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
## Allowed values: soft, hard
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param nginx.nodeAffinityPreset.type NGINX Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param nginx.nodeAffinityPreset.key NGINX Node label key to match Ignored if `affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param nginx.nodeAffinityPreset.values NGINX Node label values to match. Ignored if `affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param nginx.affinity NGINX Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param nginx.nodeSelector NGINX Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param nginx.tolerations NGINX Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param nginx.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param nginx.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param nginx.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param nginx.sidecars Add additional sidecar containers to the NGINX pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param nginx.initContainers Add additional init containers to the NGINX pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param nginx.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param nginx.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param nginx.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `nginx.pdb.minAvailable` and `nginx.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param nginx.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the NGINX pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param nginx.extraVolumes Optionally specify extra list of additional volumes for the NGINX pods
|
|
##
|
|
extraVolumes: []
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param nginx.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param nginx.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param nginx.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param nginx.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param nginx.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param nginx.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param nginx.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section Harbor Portal Parameters
|
|
##
|
|
portal:
|
|
## Bitnami Harbor Portal image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-portal/tags/
|
|
## @param portal.image.registry [default: REGISTRY_NAME] Harbor Portal image registry
|
|
## @param portal.image.repository [default: REPOSITORY_NAME/harbor-portal] Harbor Portal image repository
|
|
## @skip portal.image.tag Harbor Portal image tag (immutable tags are recommended)
|
|
## @param portal.image.digest Harbor Portal image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param portal.image.pullPolicy Harbor Portal image pull policy
|
|
## @param portal.image.pullSecrets Harbor Portal image pull secrets
|
|
## @param portal.image.debug Enable Harbor Portal image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-portal
|
|
tag: 2.12.2-debian-12-r0
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## Use TLS in the container
|
|
##
|
|
tls:
|
|
## @param portal.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
|
|
## Requires `internalTLS.enabled` to be set to `true``
|
|
## Self-signed TLS certificates will be used otherwise
|
|
##
|
|
existingSecret: ""
|
|
## @param portal.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param portal.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param portal.extraEnvVars Array with extra environment variables to add Harbor Portal pods
|
|
##
|
|
extraEnvVars: []
|
|
## @param portal.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Portal pods
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param portal.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Portal pods
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param portal.containerPorts.http Harbor Portal HTTP container port
|
|
## @param portal.containerPorts.https Harbor Portal HTTPS container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
## @param portal.replicaCount Number of Harbor Portal replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure extra options for Harbor Portal containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param portal.livenessProbe.enabled Enable livenessProbe on Harbor Portal containers
|
|
## @param portal.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param portal.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param portal.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param portal.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param portal.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param portal.readinessProbe.enabled Enable readinessProbe on Harbor Portal containers
|
|
## @param portal.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param portal.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param portal.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param portal.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param portal.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param portal.startupProbe.enabled Enable startupProbe on Harbor Portal containers
|
|
## @param portal.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param portal.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param portal.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param portal.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param portal.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param portal.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param portal.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param portal.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Portal resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param portal.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if portal.resources is set (portal.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param portal.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Harbor Portal pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param portal.podSecurityContext.enabled Enabled Harbor Portal pods' Security Context
|
|
## @param portal.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param portal.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param portal.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param portal.podSecurityContext.fsGroup Set Harbor Portal pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure Harbor Portal containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param portal.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param portal.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param portal.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param portal.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param portal.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param portal.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param portal.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param portal.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param portal.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param portal.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param portal.updateStrategy.type Harbor Portal deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param portal.lifecycleHooks LifecycleHook for the Harbor Portal container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param portal.hostAliases Harbor Portal pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param portal.podLabels Add additional labels to the Harbor Portal pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param portal.podAnnotations Annotations to add to the Harbor Portal pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param portal.podAffinityPreset Harbor Portal Pod affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param portal.podAntiAffinityPreset Harbor Portal Pod anti-affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param portal.nodeAffinityPreset.type Harbor Portal Node affinity preset type. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param portal.nodeAffinityPreset.key Harbor Portal Node label key to match Ignored if `portal.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param portal.nodeAffinityPreset.values Harbor Portal Node label values to match. Ignored if `portal.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param portal.affinity Harbor Portal Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: portal.podAffinityPreset, portal.podAntiAffinityPreset, and portal.nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param portal.nodeSelector Harbor Portal Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param portal.tolerations Harbor Portal Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param portal.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param portal.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param portal.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param portal.sidecars Add additional sidecar containers to the Harbor Portal pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param portal.initContainers Add additional init containers to the Harbor Portal pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param portal.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param portal.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param portal.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `portal.pdb.minAvailable` and `portal.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param portal.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Portal pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param portal.extraVolumes Optionally specify extra list of additional volumes for the Harbor Portal pods
|
|
##
|
|
extraVolumes: []
|
|
## @param portal.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Portal ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param portal.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param portal.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param portal.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param portal.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Harbor Portal service configuration
|
|
##
|
|
service:
|
|
## @param portal.service.ports.http Harbor Portal HTTP service port
|
|
## @param portal.service.ports.https Harbor Portal HTTPS service port
|
|
##
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param portal.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param portal.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param portal.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param portal.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param portal.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param portal.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param portal.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section Harbor Core Parameters
|
|
##
|
|
core:
|
|
## Bitnami Harbor Core image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-core/tags/
|
|
## @param core.image.registry [default: REGISTRY_NAME] Harbor Core image registry
|
|
## @param core.image.repository [default: REPOSITORY_NAME/harbor-core] Harbor Core image repository
|
|
## @skip core.image.tag Harbor Core image tag (immutable tags are recommended)
|
|
## @param core.image.digest Harbor Core image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param core.image.pullPolicy Harbor Core image pull policy
|
|
## @param core.image.pullSecrets Harbor Core image pull secrets
|
|
## @param core.image.debug Enable Harbor Core image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-core
|
|
tag: 2.12.2-debian-12-r1
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param core.sessionLifetime Explicitly set a session timeout (in seconds) overriding the backend default.
|
|
##
|
|
sessionLifetime: ""
|
|
## @param core.uaaSecret If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`.
|
|
##
|
|
uaaSecret: ""
|
|
## @param core.secretKey The key used for encryption. Must be a string of 16 chars
|
|
## e.g:
|
|
## secretKey: "not-a-secure-string"
|
|
##
|
|
secretKey: {{ .Values.globals.harbor.coreSecretKey }}
|
|
## @param core.secret Secret used when the core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars.
|
|
##
|
|
secret: {{ .Values.globals.harbor.coreSecret }}
|
|
## @param core.tokenKey Key of the certificate used for token encryption/decryption.
|
|
##
|
|
tokenKey: {{ .Values.globals.harbor.coreTlsKey | quote }}
|
|
## @param core.tokenCert Certificate used for token encryption/decryption.
|
|
##
|
|
tokenCert: {{ .Values.globals.harbor.coreTlsCert | quote }}
|
|
## @param core.secretName Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain two keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set
|
|
##
|
|
secretName: ""
|
|
## @param core.existingSecret Existing secret for core
|
|
## The secret must contain the keys:
|
|
## `secret` (required),
|
|
## `secretKey` (required),
|
|
##
|
|
existingSecret: ""
|
|
## @param core.existingEnvVarsSecret Existing secret for core envvars
|
|
## The secret must contain the keys:
|
|
## `CSRF_KEY` (optional - alternatively auto-generated),
|
|
## `HARBOR_ADMIN_PASSWORD` (optional - alternatively auto-generated),
|
|
## `POSTGRESQL_PASSWORD` (optional - alternatively uses weak upstream default. Read below if you set it. You must also set postgresql.auth.existingSecret to the same value as core.existingEnvVarsSecret for this to work!),
|
|
## `postgres-password` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.)
|
|
## `HARBOR_DATABASE_PASSWORD` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.)
|
|
## `REGISTRY_CREDENTIAL_USERNAME` (optional - alternatively weak defaults),
|
|
## `REGISTRY_CREDENTIAL_PASSWORD` (optional - alternatively weak defaults),
|
|
## `_REDIS_URL_CORE` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/0")
|
|
## `_REDIS_URL_REG` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/2")
|
|
##
|
|
## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret
|
|
## Notes:
|
|
## As a EnvVars secret, this secret also store redis config urls
|
|
## The HARBOR_ADMIN_PASSWORD is only required at initial deployment, once the password is set in database, it is not used anymore
|
|
##
|
|
existingEnvVarsSecret: ""
|
|
## @param core.csrfKey The CSRF key. Will be generated automatically if it isn't specified
|
|
##
|
|
csrfKey: {{ .Values.globals.harbor.coreCsrfKey }}
|
|
## Use TLS in the container
|
|
##
|
|
tls:
|
|
## @param core.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
|
|
## Requires `internalTLS.enabled` to be set to `true``
|
|
## Self-signed TLS certificates will be used otherwise
|
|
##
|
|
existingSecret: ""
|
|
## @param core.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param core.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param core.extraEnvVars Array with extra environment variables to add Harbor Core pods
|
|
##
|
|
extraEnvVars: []
|
|
## @param core.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Core pods
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param core.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Core pods
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param core.configOverwriteJson String containing a JSON with configuration overrides
|
|
## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings
|
|
##
|
|
configOverwriteJson: ""
|
|
## @param core.configOverwriteJsonSecret Secret containing the JSON configuration overrides
|
|
## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings
|
|
##
|
|
configOverwriteJsonSecret: ""
|
|
## @param core.containerPorts.http Harbor Core HTTP container port
|
|
## @param core.containerPorts.https Harbor Core HTTPS container port
|
|
## @param core.containerPorts.metrics Harbor Core metrics container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
metrics: 8001
|
|
## @param core.replicaCount Number of Harbor Core replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure extra options for Harbor Core containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param core.livenessProbe.enabled Enable livenessProbe on Harbor Core containers
|
|
## @param core.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param core.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param core.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param core.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param core.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param core.readinessProbe.enabled Enable readinessProbe on Harbor Core containers
|
|
## @param core.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param core.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param core.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param core.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param core.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param core.startupProbe.enabled Enable startupProbe on Harbor Core containers
|
|
## @param core.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param core.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param core.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param core.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param core.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param core.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param core.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param core.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Core resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param core.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if core.resources is set (core.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param core.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Harbor Core pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param core.podSecurityContext.enabled Enabled Harbor Core pods' Security Context
|
|
## @param core.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param core.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param core.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param core.podSecurityContext.fsGroup Set Harbor Core pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure Harbor Core containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param core.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param core.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param core.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param core.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param core.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param core.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param core.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param core.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param core.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param core.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param core.updateStrategy.type Harbor Core deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param core.lifecycleHooks LifecycleHook for the Harbor Core container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param core.hostAliases Harbor Core pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param core.podLabels Add additional labels to the Harbor Core pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param core.podAnnotations Annotations to add to the Harbor Core pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param core.podAffinityPreset Harbor Core Pod affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param core.podAntiAffinityPreset Harbor Core Pod anti-affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param core.nodeAffinityPreset.type Harbor Core Node affinity preset type. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param core.nodeAffinityPreset.key Harbor Core Node label key to match Ignored if `core.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param core.nodeAffinityPreset.values Harbor Core Node label values to match. Ignored if `core.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param core.affinity Harbor Core Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: core.podAffinityPreset, core.podAntiAffinityPreset, and core.nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param core.nodeSelector Harbor Core Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param core.tolerations Harbor Core Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param core.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param core.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param core.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param core.sidecars Add additional sidecar containers to the Harbor Core pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param core.initContainers Add additional init containers to the Harbor Core pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param core.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param core.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param core.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `core.pdb.minAvailable` and `core.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param core.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Core pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param core.extraVolumes Optionally specify extra list of additional volumes for the Harbor Core pods
|
|
##
|
|
extraVolumes: []
|
|
## @param core.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Core ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param core.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param core.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param core.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param core.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Harbor Core service configuration
|
|
##
|
|
service:
|
|
## @param core.service.ports.http Harbor Core HTTP service port
|
|
## @param core.service.ports.https Harbor Core HTTPS service port
|
|
## @param core.service.ports.metrics Harbor Core metrics service port
|
|
##
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
metrics: 8001
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param core.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param core.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param core.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param core.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param core.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param core.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param core.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section Harbor Jobservice Parameters
|
|
##
|
|
jobservice:
|
|
## Bitnami Harbor Jobservice image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-jobservice/tags/
|
|
## @param jobservice.image.registry [default: REGISTRY_NAME] Harbor Jobservice image registry
|
|
## @param jobservice.image.repository [default: REPOSITORY_NAME/harbor-jobservice] Harbor Jobservice image repository
|
|
## @skip jobservice.image.tag Harbor Jobservice image tag (immutable tags are recommended)
|
|
## @param jobservice.image.digest Harbor Jobservice image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param jobservice.image.pullPolicy Harbor Jobservice image pull policy
|
|
## @param jobservice.image.pullSecrets Harbor Jobservice image pull secrets
|
|
## @param jobservice.image.debug Enable Harbor Jobservice image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-jobservice
|
|
tag: 2.12.2-debian-12-r1
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param jobservice.maxJobWorkers The max job workers
|
|
##
|
|
maxJobWorkers: 10
|
|
## @param jobservice.redisNamespace Redis namespace for jobservice
|
|
##
|
|
redisNamespace: harbor_job_service_namespace
|
|
## @param jobservice.jobLogger The logger for jobs: `file`, `database` or `stdout`
|
|
##
|
|
jobLogger: file
|
|
## @param jobservice.secret Secret used when the job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars.
|
|
## If a secret key is not specified, Helm will generate one.
|
|
## Must be a string of 16 chars.
|
|
##
|
|
secret: {{ .Values.globals.harbor.jobserviceSecret }}
|
|
## @param jobservice.existingSecret Existing secret for jobservice
|
|
## The secret must contain the keys:
|
|
## `secret` (required),
|
|
##
|
|
existingSecret: ""
|
|
## @param jobservice.existingEnvVarsSecret Existing secret for jobservice envvars
|
|
## The secret must contain the keys:
|
|
## `REGISTRY_CREDENTIAL_PASSWORD` (optional),
|
|
## `JOB_SERVICE_POOL_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/1"),
|
|
##
|
|
## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret
|
|
existingEnvVarsSecret: ""
|
|
## Use TLS in the container
|
|
##
|
|
tls:
|
|
## @param jobservice.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
|
|
## Requires `internalTLS.enabled` to be set to `true``
|
|
## Self-signed TLS certificates will be used otherwise
|
|
##
|
|
existingSecret: ""
|
|
## @param jobservice.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param jobservice.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param jobservice.extraEnvVars Array with extra environment variables to add Harbor Jobservice pods
|
|
##
|
|
extraEnvVars: []
|
|
## @param jobservice.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Jobservice pods
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param jobservice.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Jobservice pods
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param jobservice.containerPorts.http Harbor Jobservice HTTP container port
|
|
## @param jobservice.containerPorts.https Harbor Jobservice HTTPS container port
|
|
## @param jobservice.containerPorts.metrics Harbor Jobservice metrics container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
metrics: 8001
|
|
## @param jobservice.replicaCount Number of Harbor Jobservice replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure extra options for Harbor Jobservice containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param jobservice.livenessProbe.enabled Enable livenessProbe on Harbor Jobservice containers
|
|
## @param jobservice.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param jobservice.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param jobservice.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param jobservice.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param jobservice.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param jobservice.readinessProbe.enabled Enable readinessProbe on Harbor Jobservice containers
|
|
## @param jobservice.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param jobservice.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param jobservice.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param jobservice.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param jobservice.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param jobservice.startupProbe.enabled Enable startupProbe on Harbor Jobservice containers
|
|
## @param jobservice.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param jobservice.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param jobservice.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param jobservice.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param jobservice.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param jobservice.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param jobservice.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param jobservice.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Jobservice resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param jobservice.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobservice.resources is set (jobservice.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param jobservice.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Harbor Jobservice pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param jobservice.podSecurityContext.enabled Enabled Harbor Jobservice pods' Security Context
|
|
## @param jobservice.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param jobservice.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param jobservice.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param jobservice.podSecurityContext.fsGroup Set Harbor Jobservice pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure Harbor Jobservice containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param jobservice.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param jobservice.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param jobservice.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param jobservice.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param jobservice.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param jobservice.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param jobservice.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param jobservice.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param jobservice.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param jobservice.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param jobservice.updateStrategy.type Harbor Jobservice deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param jobservice.lifecycleHooks LifecycleHook for the Harbor Jobservice container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param jobservice.hostAliases Harbor Jobservice pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param jobservice.podLabels Add additional labels to the Harbor Jobservice pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param jobservice.podAnnotations Annotations to add to the Harbor Jobservice pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param jobservice.podAffinityPreset Harbor Jobservice Pod affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param jobservice.podAntiAffinityPreset Harbor Jobservice Pod anti-affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param jobservice.nodeAffinityPreset.type Harbor Jobservice Node affinity preset type. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param jobservice.nodeAffinityPreset.key Harbor Jobservice Node label key to match Ignored if `jobservice.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param jobservice.nodeAffinityPreset.values Harbor Jobservice Node label values to match. Ignored if `jobservice.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param jobservice.affinity Harbor Jobservice Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: jobservice.podAffinityPreset, jobservice.podAntiAffinityPreset, and jobservice.nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param jobservice.nodeSelector Harbor Jobservice Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param jobservice.tolerations Harbor Jobservice Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param jobservice.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param jobservice.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param jobservice.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param jobservice.sidecars Add additional sidecar containers to the Harbor Jobservice pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param jobservice.initContainers Add additional init containers to the Harbor Jobservice pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param jobservice.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param jobservice.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param jobservice.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `jobservice.pdb.minAvailable` and `jobservice.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param jobservice.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Jobservice pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param jobservice.extraVolumes Optionally specify extra list of additional volumes for the Harbor Jobservice pods
|
|
##
|
|
extraVolumes: []
|
|
## @param jobservice.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Jobservice ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param jobservice.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param jobservice.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param jobservice.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param jobservice.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Harbor Jobservice service configuration
|
|
##
|
|
service:
|
|
## @param jobservice.service.ports.http Harbor Jobservice HTTP service port
|
|
## @param jobservice.service.ports.https Harbor Jobservice HTTPS service port
|
|
## @param jobservice.service.ports.metrics Harbor Jobservice HTTPS service port
|
|
##
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
metrics: 8001
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param jobservice.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param jobservice.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param jobservice.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param jobservice.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param jobservice.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param jobservice.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param jobservice.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section Harbor Registry Parameters
|
|
##
|
|
|
|
## Registry Parameters
|
|
##
|
|
registry:
|
|
## @param registry.secret Secret is used to secure the upload state from client and registry storage backend. See: <https://github.com/docker/distribution/blob/master/docs/configuration.md>
|
|
## and registry storage backend.
|
|
## See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http
|
|
## If a secret key is not specified, Helm will generate one.
|
|
## Must be a string of 16 chars.
|
|
##
|
|
secret: {{ .Values.globals.harbor.registrySecret }}
|
|
## @param registry.existingSecret Existing secret for registry
|
|
## The secret must contain the keys:
|
|
## `REGISTRY_HTPASSWD` (required - replaces insecure defaults),
|
|
## `REGISTRY_HTTP_SECRET` (optional - generated on the fly if not secified),
|
|
## `REGISTRY_REDIS_PASSWORD` (optional),
|
|
##
|
|
existingSecret: ""
|
|
## @param registry.relativeurls Make the registry return relative URLs in Location headers. The client is responsible for resolving the correct URL.
|
|
##
|
|
relativeurls: false
|
|
## @param registry.credentials.username The username for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd)
|
|
## @param registry.credentials.password The password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation.
|
|
## @param registry.credentials.htpasswd The content of htpasswd file based on the value of `registry.credentials.username` `registry.credentials.password`. Currently `helm` does not support bcrypt in the template script, if the credential is updated you need to manually generated by calling
|
|
##
|
|
credentials:
|
|
username: {{ .Values.globals.harbor.username }}
|
|
password: {{ .Values.globals.harbor.password }}
|
|
## If you update the username or password of registry, make sure use cli tool htpasswd to generate the bcrypt hash
|
|
## e.g. "htpasswd -nbBC10 $username $password"
|
|
##
|
|
htpasswd: {{ .Values.globals.harbor.htpasswd }}
|
|
middleware:
|
|
## @param registry.middleware.enabled Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See
|
|
##
|
|
enabled: false
|
|
## @param registry.middleware.type CDN type for the middleware
|
|
##
|
|
type: cloudFront
|
|
## @param registry.middleware.cloudFront.baseurl CloudFront CDN settings: Base URL
|
|
## @param registry.middleware.cloudFront.keypairid CloudFront CDN settings: Keypair ID
|
|
## @param registry.middleware.cloudFront.duration CloudFront CDN settings: Duration
|
|
## @param registry.middleware.cloudFront.ipfilteredby CloudFront CDN settings: IP filters
|
|
## @param registry.middleware.cloudFront.privateKeySecret CloudFront CDN settings: Secret name with the private key
|
|
##
|
|
cloudFront:
|
|
baseurl: example.cloudfront.net
|
|
keypairid: KEYPAIRID
|
|
duration: 3000s
|
|
ipfilteredby: none
|
|
## The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
|
|
## that allows access to CloudFront
|
|
##
|
|
privateKeySecret: my-secret
|
|
## Use TLS in the container
|
|
##
|
|
tls:
|
|
## @param registry.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
|
|
## Requires `internalTLS.enabled` to be set to `true``
|
|
## Self-signed TLS certificates will be used otherwise
|
|
##
|
|
existingSecret: ""
|
|
## @param registry.replicaCount Number of Harbor Registry replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure Harbor Registry pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param registry.podSecurityContext.enabled Enabled Harbor Registry pods' Security Context
|
|
## @param registry.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param registry.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param registry.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param registry.podSecurityContext.fsGroup Set Harbor Registry pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## @param registry.updateStrategy.type Harbor Registry deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param registry.hostAliases Harbor Registry pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param registry.podLabels Add additional labels to the Harbor Registry pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param registry.podAnnotations Annotations to add to the Harbor Registry pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param registry.podAffinityPreset Harbor Registry Pod affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param registry.podAntiAffinityPreset Harbor Registry Pod anti-affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param registry.nodeAffinityPreset.type Harbor Registry Node affinity preset type. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param registry.nodeAffinityPreset.key Harbor Registry Node label key to match Ignored if `registry.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param registry.nodeAffinityPreset.values Harbor Registry Node label values to match. Ignored if `registry.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param registry.affinity Harbor Registry Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: registry.podAffinityPreset, registry.podAntiAffinityPreset, and registry.nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param registry.nodeSelector Harbor Registry Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param registry.tolerations Harbor Registry Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param registry.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param registry.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param registry.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param registry.sidecars Add additional sidecar containers to the Harbor Registry pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param registry.initContainers Add additional init containers to the Harbor Registry pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param registry.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param registry.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param registry.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `registry.pdb.minAvailable` and `registry.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param registry.extraVolumes Optionally specify extra list of additional volumes for the Harbor Registry pods
|
|
##
|
|
extraVolumes: []
|
|
## @param registry.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Registry ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param registry.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: true
|
|
## @param registry.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param registry.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param registry.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param registry.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param registry.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param registry.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param registry.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param registry.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param registry.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param registry.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## Harbor Registry main container parameters
|
|
##
|
|
server:
|
|
## Bitnami Harbor Registry image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-registry/tags/
|
|
## @param registry.server.image.registry [default: REGISTRY_NAME] Harbor Registry image registry
|
|
## @param registry.server.image.repository [default: REPOSITORY_NAME/harbor-registry] Harbor Registry image repository
|
|
## @skip registry.server.image.tag Harbor Registry image tag (immutable tags are recommended)
|
|
## @param registry.server.image.digest Harbor Registry image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param registry.server.image.pullPolicy Harbor Registry image pull policy
|
|
## @param registry.server.image.pullSecrets Harbor Registry image pull secrets
|
|
## @param registry.server.image.debug Enable Harbor Registry image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-registry
|
|
tag: 2.12.2-debian-12-r1
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param registry.server.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param registry.server.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param registry.server.extraEnvVars Array with extra environment variables to add Harbor Registry main containers
|
|
##
|
|
extraEnvVars: []
|
|
## @param registry.server.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registry main containers
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param registry.server.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registry main containers
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param registry.server.containerPorts.http Harbor Registry HTTP container port
|
|
## @param registry.server.containerPorts.https Harbor Registry HTTPS container port
|
|
## @param registry.server.containerPorts.debug Harbor Registry debug container port
|
|
## @param registry.server.containerPorts.metrics Harbor Registry metrics container port
|
|
##
|
|
containerPorts:
|
|
http: 5000
|
|
https: 5443
|
|
debug: 5001
|
|
metrics: 8001
|
|
## Configure extra options for Harbor Registry main containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param registry.server.livenessProbe.enabled Enable livenessProbe on Harbor Registry main containers
|
|
## @param registry.server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param registry.server.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param registry.server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param registry.server.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param registry.server.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param registry.server.readinessProbe.enabled Enable readinessProbe on Harbor Registry main containers
|
|
## @param registry.server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param registry.server.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param registry.server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param registry.server.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param registry.server.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param registry.server.startupProbe.enabled Enable startupProbe on Harbor Registry main containers
|
|
## @param registry.server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param registry.server.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param registry.server.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param registry.server.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param registry.server.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param registry.server.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param registry.server.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param registry.server.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Registry main resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param registry.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.server.resources is set (registry.server.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param registry.server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Harbor Registry main containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param registry.server.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param registry.server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param registry.server.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param registry.server.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param registry.server.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param registry.server.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param registry.server.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param registry.server.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param registry.server.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param registry.server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param registry.server.lifecycleHooks LifecycleHook for the Harbor Registry main container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param registry.server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registry main pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## Harbor Registry service configuration
|
|
##
|
|
service:
|
|
## @param registry.server.service.ports.http Harbor Registry HTTP service port
|
|
## @param registry.server.service.ports.https Harbor Registry HTTPS service port
|
|
## @param registry.server.service.ports.metrics Harbor Registry metrics service port
|
|
##
|
|
ports:
|
|
http: 5000
|
|
https: 5443
|
|
metrics: 8001
|
|
## Harbor Registryctl parameters
|
|
##
|
|
controller:
|
|
## Bitnami Harbor Registryctl image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-registryctl/tags/
|
|
## @param registry.controller.image.registry [default: REGISTRY_NAME] Harbor Registryctl image registry
|
|
## @param registry.controller.image.repository [default: REPOSITORY_NAME/harbor-registryctl] Harbor Registryctl image repository
|
|
## @skip registry.controller.image.tag Harbor Registryctl image tag (immutable tags are recommended)
|
|
## @param registry.controller.image.digest Harbor Registryctl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param registry.controller.image.pullPolicy Harbor Registryctl image pull policy
|
|
## @param registry.controller.image.pullSecrets Harbor Registryctl image pull secrets
|
|
## @param registry.controller.image.debug Enable Harbor Registryctl image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-registryctl
|
|
tag: 2.12.2-debian-12-r1
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param registry.controller.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param registry.controller.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param registry.controller.extraEnvVars Array with extra environment variables to add Harbor Registryctl containers
|
|
##
|
|
extraEnvVars: []
|
|
## @param registry.controller.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registryctl containers
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param registry.controller.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registryctl containers
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param registry.controller.containerPorts.http Harbor Registryctl HTTP container port
|
|
## @param registry.controller.containerPorts.https Harbor Registryctl HTTPS container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
## Configure extra options for Harbor Registryctl containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param registry.controller.livenessProbe.enabled Enable livenessProbe on Harbor Registryctl containers
|
|
## @param registry.controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param registry.controller.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param registry.controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param registry.controller.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param registry.controller.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param registry.controller.readinessProbe.enabled Enable readinessProbe on Harbor Registryctl containers
|
|
## @param registry.controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param registry.controller.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param registry.controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param registry.controller.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param registry.controller.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param registry.controller.startupProbe.enabled Enable startupProbe on Harbor Registryctl containers
|
|
## @param registry.controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param registry.controller.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param registry.controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param registry.controller.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param registry.controller.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param registry.controller.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param registry.controller.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param registry.controller.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Registryctl resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param registry.controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.controller.resources is set (registry.controller.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param registry.controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Harbor Registryctl containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param registry.controller.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param registry.controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param registry.controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param registry.controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param registry.controller.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param registry.controller.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param registry.controller.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param registry.controller.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param registry.controller.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param registry.controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param registry.controller.lifecycleHooks LifecycleHook for the Harbor Registryctl container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param registry.controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registryctl pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## Harbor Registryctl service configuration
|
|
##
|
|
service:
|
|
## @param registry.controller.service.ports.http Harbor Registryctl HTTP service port
|
|
## @param registry.controller.service.ports.https Harbor Registryctl HTTPS service port
|
|
##
|
|
ports:
|
|
http: 8080
|
|
https: 8443
|
|
## @section Harbor Adapter Trivy Parameters
|
|
##
|
|
trivy:
|
|
## Bitnami Harbor Adapter Trivy image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-adapter-trivy/tags/
|
|
## @param trivy.image.registry [default: REGISTRY_NAME] Harbor Adapter Trivy image registry
|
|
## @param trivy.image.repository [default: REPOSITORY_NAME/harbor-adapter-trivy] Harbor Adapter Trivy image repository
|
|
## @skip trivy.image.tag Harbor Adapter Trivy image tag (immutable tags are recommended)
|
|
## @param trivy.image.digest Harbor Adapter Trivy image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param trivy.image.pullPolicy Harbor Adapter Trivy image pull policy
|
|
## @param trivy.image.pullSecrets Harbor Adapter Trivy image pull secrets
|
|
## @param trivy.image.debug Enable Harbor Adapter Trivy image debug mode
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-adapter-trivy
|
|
tag: 2.12.2-debian-12-r0
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param trivy.enabled Enable Trivy
|
|
##
|
|
enabled: true
|
|
## @param trivy.debugMode The flag to enable Trivy debug mode
|
|
##
|
|
debugMode: false
|
|
## @param trivy.vulnType Comma-separated list of vulnerability types. Possible values `os` and `library`.
|
|
##
|
|
vulnType: "os,library"
|
|
## @param trivy.severity Comma-separated list of severities to be checked
|
|
##
|
|
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
|
|
## @param trivy.ignoreUnfixed The flag to display only fixed vulnerabilities
|
|
##
|
|
ignoreUnfixed: false
|
|
## @param trivy.insecure The flag to skip verifying registry certificate
|
|
##
|
|
insecure: false
|
|
## @param trivy.existingEnvVarsSecret Existing secret for trivy
|
|
## The secret must contain the keys:
|
|
## `SCANNER_TRIVY_GITHUB_TOKEN` (optional)
|
|
## `SCANNER_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
|
|
## `SCANNER_STORE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
|
|
## `SCANNER_JOB_QUEUE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
|
|
##
|
|
existingEnvVarsSecret: ""
|
|
## @param trivy.gitHubToken The GitHub access token to download Trivy DB
|
|
##
|
|
## Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
|
## It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
|
## in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
|
|
## timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
|
|
## Currently, the database is updated every 12 hours and published as a new release to GitHub.
|
|
##
|
|
## Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
|
## for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
|
## requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
|
## https://developer.github.com/v3/#rate-limiting
|
|
##
|
|
## You can create a GitHub token by following the instructions in
|
|
## https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
|
##
|
|
gitHubToken: ""
|
|
## @param trivy.skipUpdate The flag to disable Trivy DB downloads from GitHub
|
|
## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
|
|
## If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
|
|
## `/bitnami/harbor-adapter-trivy/.cache/trivy/db/trivy.db` path.
|
|
## ref: https://trivy.dev/dev/docs/configuration/db/
|
|
##
|
|
skipUpdate: false
|
|
## @param trivy.skipJavaDbUpdate The flag to disable Trivy JAVA DB downloads.
|
|
## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
|
|
## If the value is set to `true` you have to manually download the `trivy-java.db` file and mount it in the
|
|
## `/bitnami/harbor-adapter-trivy/.cache/trivy/java-db/trivy-java.db` path.
|
|
##
|
|
skipJavaDbUpdate: false
|
|
## @param trivy.dbRepository OCI repositor(ies) to retrieve the trivy vulnerability database from
|
|
dbRepository: ""
|
|
## @param trivy.javaDbRepository OCI repositor(ies) to retrieve the Java trivy vulnerability database from
|
|
javaDbRepository: ""
|
|
## @param trivy.cacheDir Directory to store the cache
|
|
##
|
|
cacheDir: "/bitnami/harbor-adapter-trivy/.cache"
|
|
## Use TLS in the container
|
|
##
|
|
tls:
|
|
## @param trivy.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
|
|
## Requires `internalTLS.enabled` to be set to `true``
|
|
## Self-signed TLS certificates will be used otherwise
|
|
##
|
|
existingSecret: ""
|
|
## @param trivy.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param trivy.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param trivy.extraEnvVars Array with extra environment variables to add Trivy pods
|
|
##
|
|
extraEnvVars: []
|
|
## @param trivy.extraEnvVarsCM ConfigMap containing extra environment variables for Trivy pods
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param trivy.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Trivy pods
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param trivy.containerPorts.http Trivy HTTP container port
|
|
## @param trivy.containerPorts.https Trivy HTTPS container port
|
|
##
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
## @param trivy.replicaCount Number of Trivy replicas
|
|
##
|
|
replicaCount: 1
|
|
## Configure extra options for Trivy containers' liveness, readiness and startup probes
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
|
## @param trivy.livenessProbe.enabled Enable livenessProbe on Trivy containers
|
|
## @param trivy.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param trivy.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param trivy.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param trivy.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param trivy.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param trivy.readinessProbe.enabled Enable readinessProbe on Trivy containers
|
|
## @param trivy.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param trivy.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param trivy.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param trivy.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param trivy.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param trivy.startupProbe.enabled Enable startupProbe on Trivy containers
|
|
## @param trivy.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param trivy.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param trivy.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param trivy.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param trivy.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param trivy.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param trivy.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param trivy.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Trivy resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param trivy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if trivy.resources is set (trivy.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "small"
|
|
## @param trivy.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Trivy pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param trivy.podSecurityContext.enabled Enabled Trivy pods' Security Context
|
|
## @param trivy.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param trivy.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param trivy.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param trivy.podSecurityContext.fsGroup Set Trivy pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure Trivy containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param trivy.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param trivy.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param trivy.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param trivy.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param trivy.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param trivy.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param trivy.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param trivy.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param trivy.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param trivy.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param trivy.updateStrategy.type Trivy deployment strategy type - only really applicable for deployments with RWO PVs attached
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param trivy.lifecycleHooks LifecycleHook for the Trivy container(s) to automate configuration before or after startup
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param trivy.hostAliases Trivy pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param trivy.podLabels Add additional labels to the Trivy pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param trivy.podAnnotations Annotations to add to the Trivy pods (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param trivy.podAffinityPreset Trivy Pod affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param trivy.podAntiAffinityPreset Trivy Pod anti-affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param trivy.nodeAffinityPreset.type Trivy Node affinity preset type. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param trivy.nodeAffinityPreset.key Trivy Node label key to match Ignored if `trivy.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param trivy.nodeAffinityPreset.values Trivy Node label values to match. Ignored if `trivy.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param trivy.affinity Trivy Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: trivy.podAffinityPreset, trivy.podAntiAffinityPreset, and trivy.nodeAffinityPreset will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param trivy.nodeSelector Trivy Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param trivy.tolerations Trivy Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param trivy.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param trivy.priorityClassName Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param trivy.schedulerName Use an alternate scheduler, e.g. "stork".
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param trivy.sidecars Add additional sidecar containers to the Trivy pods
|
|
## Example:
|
|
## sidecars:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
sidecars: []
|
|
## @param trivy.initContainers Add additional init containers to the Trivy pods
|
|
## Example:
|
|
## initContainers:
|
|
## - name: your-image-name
|
|
## image: your-image
|
|
## imagePullPolicy: Always
|
|
## ports:
|
|
## - name: portname
|
|
## containerPort: 1234
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param trivy.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param trivy.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param trivy.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `trivy.pdb.minAvailable` and `trivy.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param trivy.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Trivy pods
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param trivy.extraVolumes Optionally specify extra list of additional volumes for the Trivy pods
|
|
##
|
|
extraVolumes: []
|
|
## @param trivy.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Trivy ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param trivy.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param trivy.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param trivy.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param trivy.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Trivy service configuration
|
|
##
|
|
service:
|
|
## @param trivy.service.ports.http Trivy HTTP service port
|
|
## @param trivy.service.ports.https Trivy HTTPS service port
|
|
##
|
|
ports:
|
|
http: 8080
|
|
https: 8443
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param trivy.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param trivy.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param trivy.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param trivy.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param trivy.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param trivy.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param trivy.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section Harbor Exporter Parameters
|
|
##
|
|
exporter:
|
|
## Bitnami Harbor Exporter image
|
|
## ref: https://hub.docker.com/r/bitnami/harbor-exporter/tags/
|
|
## @param exporter.image.registry [default: REGISTRY_NAME] Harbor Exporter image registry
|
|
## @param exporter.image.repository [default: REPOSITORY_NAME/harbor-exporter] Harbor Exporter image repository
|
|
## @skip exporter.image.tag Harbor Exporter image tag
|
|
## @param exporter.image.digest Harbor Exporter image image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
## @param exporter.image.pullPolicy Harbor exporter image pull policy
|
|
## @param exporter.image.pullSecrets Specify docker-registry secret names as an array
|
|
## @param exporter.image.debug Specify if debug logs should be enabled
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/harbor-exporter
|
|
tag: 2.12.2-debian-12-r1
|
|
digest: ""
|
|
## Specify a imagePullPolicy
|
|
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
|
##
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
## e.g:
|
|
## pullSecrets:
|
|
## - myRegistryKeySecretName
|
|
##
|
|
pullSecrets: []
|
|
## Enable debug mode
|
|
##
|
|
debug: false
|
|
## @param exporter.command Override default container command (useful when using custom images)
|
|
##
|
|
command: []
|
|
## @param exporter.args Override default container args (useful when using custom images)
|
|
##
|
|
args: []
|
|
## @param exporter.extraEnvVars Array containing extra env vars
|
|
## For example:
|
|
## - name: HARBOR_DATABASE_SSLMODE
|
|
## value: verify-ca
|
|
##
|
|
extraEnvVars: []
|
|
## @param exporter.extraEnvVarsCM ConfigMap containing extra env vars
|
|
##
|
|
extraEnvVarsCM: ""
|
|
## @param exporter.extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data)
|
|
##
|
|
extraEnvVarsSecret: ""
|
|
## @param exporter.containerPorts.metrics Harbor Exporter HTTP container port
|
|
##
|
|
containerPorts:
|
|
metrics: 8001
|
|
## @param exporter.replicaCount The replica count
|
|
##
|
|
replicaCount: 1
|
|
## Harbor Exporter containers' liveness probe
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
|
## @param exporter.livenessProbe.enabled Enable livenessProbe
|
|
## @param exporter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
|
## @param exporter.livenessProbe.periodSeconds Period seconds for livenessProbe
|
|
## @param exporter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
|
## @param exporter.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
|
## @param exporter.livenessProbe.successThreshold Success threshold for livenessProbe
|
|
##
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## Harbor Exporter containers' readiness probe
|
|
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
|
## @param exporter.readinessProbe.enabled Enable readinessProbe
|
|
## @param exporter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
|
## @param exporter.readinessProbe.periodSeconds Period seconds for readinessProbe
|
|
## @param exporter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
|
## @param exporter.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
|
## @param exporter.readinessProbe.successThreshold Success threshold for readinessProbe
|
|
##
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 20
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 6
|
|
successThreshold: 1
|
|
## @param exporter.startupProbe.enabled Enable startupProbe on Harbor Exporter containers
|
|
## @param exporter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
|
## @param exporter.startupProbe.periodSeconds Period seconds for startupProbe
|
|
## @param exporter.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
|
## @param exporter.startupProbe.failureThreshold Failure threshold for startupProbe
|
|
## @param exporter.startupProbe.successThreshold Success threshold for startupProbe
|
|
##
|
|
startupProbe:
|
|
enabled: false
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 10
|
|
timeoutSeconds: 1
|
|
failureThreshold: 15
|
|
successThreshold: 1
|
|
## @param exporter.customLivenessProbe Custom livenessProbe that overrides the default one
|
|
##
|
|
customLivenessProbe: {}
|
|
## @param exporter.customReadinessProbe Custom readinessProbe that overrides the default one
|
|
##
|
|
customReadinessProbe: {}
|
|
## @param exporter.customStartupProbe Custom startupProbe that overrides the default one
|
|
##
|
|
customStartupProbe: {}
|
|
## Harbor Exporter resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param exporter.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if exporter.resources is set (exporter.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "nano"
|
|
## @param exporter.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## Configure Exporter pods Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
## @param exporter.podSecurityContext.enabled Enabled Exporter pods' Security Context
|
|
## @param exporter.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
|
## @param exporter.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
|
## @param exporter.podSecurityContext.supplementalGroups Set filesystem extra groups
|
|
## @param exporter.podSecurityContext.fsGroup Set Exporter pod's Security Context fsGroup
|
|
##
|
|
podSecurityContext:
|
|
enabled: true
|
|
fsGroupChangePolicy: Always
|
|
sysctls: []
|
|
supplementalGroups: []
|
|
fsGroup: 1001
|
|
## Configure Exporter containers (only main one) Security Context
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
## @param exporter.containerSecurityContext.enabled Enabled containers' Security Context
|
|
## @param exporter.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
|
## @param exporter.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
|
## @param exporter.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
|
## @param exporter.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
|
## @param exporter.containerSecurityContext.privileged Set container's Security Context privileged
|
|
## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
|
## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
|
## @param exporter.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
|
## @param exporter.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
|
##
|
|
containerSecurityContext:
|
|
enabled: true
|
|
seLinuxOptions: {}
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
runAsNonRoot: true
|
|
privileged: false
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop: ["ALL"]
|
|
seccompProfile:
|
|
type: "RuntimeDefault"
|
|
## @param exporter.updateStrategy.type The update strategy for deployments with persistent volumes: RollingUpdate or Recreate. Set it as Recreate when RWM for volumes isn't supported
|
|
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
|
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
|
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
|
##
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
## @param exporter.lifecycleHooks LifecycleHook to set additional configuration at startup, e.g. LDAP settings via REST API. Evaluated as a template
|
|
##
|
|
lifecycleHooks: {}
|
|
## @param exporter.hostAliases Exporter pods host aliases
|
|
##
|
|
hostAliases: []
|
|
## @param exporter.podLabels Add additional labels to the pod (evaluated as a template)
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
|
##
|
|
podLabels: {}
|
|
## @param exporter.podAnnotations Annotations to add to the exporter pod
|
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
|
##
|
|
podAnnotations: {}
|
|
## @param exporter.podAffinityPreset Harbor Exporter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
|
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAffinityPreset: ""
|
|
## @param exporter.podAntiAffinityPreset Harbor Exporter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
##
|
|
podAntiAffinityPreset: soft
|
|
## Node affinity preset
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
##
|
|
nodeAffinityPreset:
|
|
## @param exporter.nodeAffinityPreset.type Harbor Exporter Node affinity preset type. Ignored if `exporter.affinity` is set. Allowed values: `soft` or `hard`
|
|
##
|
|
type: ""
|
|
## @param exporter.nodeAffinityPreset.key Harbor Exporter Node label key to match Ignored if `exporter.affinity` is set.
|
|
## E.g.
|
|
## key: "kubernetes.io/e2e-az-name"
|
|
##
|
|
key: ""
|
|
## @param exporter.nodeAffinityPreset.values Harbor Exporter Node label values to match. Ignored if `exporter.affinity` is set.
|
|
## E.g.
|
|
## values:
|
|
## - e2e-az1
|
|
## - e2e-az2
|
|
##
|
|
values: []
|
|
## @param exporter.affinity Harbor Exporter Affinity for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
## Note: `exporter.podAffinityPreset`, `exporter.podAntiAffinityPreset`, and `exporter.nodeAffinityPreset` will be ignored when it's set
|
|
##
|
|
affinity: {}
|
|
## @param exporter.priorityClassName Exporter pods Priority Class Name
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
##
|
|
priorityClassName: ""
|
|
## @param exporter.schedulerName Name of the k8s scheduler (other than default)
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
## @param exporter.nodeSelector Harbor Exporter Node labels for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
|
##
|
|
nodeSelector: {}
|
|
## @param exporter.tolerations Harbor Exporter Tolerations for pod assignment
|
|
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
##
|
|
tolerations: []
|
|
## @param exporter.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
|
##
|
|
topologySpreadConstraints: []
|
|
## @param exporter.initContainers Add additional init containers to the pod (evaluated as a template)
|
|
##
|
|
initContainers: []
|
|
## Pod Disruption Budget configuration
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
|
## @param exporter.pdb.create Enable/disable a Pod Disruption Budget creation
|
|
## @param exporter.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
|
## @param exporter.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `exporter.pdb.minAvailable` and `exporter.pdb.maxUnavailable` are empty.
|
|
##
|
|
pdb:
|
|
create: true
|
|
minAvailable: ""
|
|
maxUnavailable: ""
|
|
## @param exporter.extraVolumeMounts
|
|
##
|
|
extraVolumeMounts: []
|
|
## @param exporter.extraVolumes
|
|
##
|
|
extraVolumes: []
|
|
## @param exporter.sidecars Attach additional containers to the pod (evaluated as a template)
|
|
##
|
|
sidecars: []
|
|
## @param exporter.automountServiceAccountToken Mount Service Account token in pod
|
|
##
|
|
automountServiceAccountToken: false
|
|
## Harbor Exporter ServiceAccount configuration
|
|
##
|
|
serviceAccount:
|
|
## @param exporter.serviceAccount.create Specifies whether a ServiceAccount should be created
|
|
##
|
|
create: false
|
|
## @param exporter.serviceAccount.name The name of the ServiceAccount to use.
|
|
## If not set and create is true, a name is generated using the common.names.fullname template
|
|
##
|
|
name: ""
|
|
## @param exporter.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
|
##
|
|
automountServiceAccountToken: false
|
|
## @param exporter.serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
|
##
|
|
annotations: {}
|
|
## Exporter service configuration
|
|
##
|
|
service:
|
|
## @param exporter.service.ports.metrics Exporter HTTP service port
|
|
##
|
|
ports:
|
|
metrics: 8001
|
|
## Network Policies
|
|
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
|
##
|
|
networkPolicy:
|
|
## @param exporter.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
|
##
|
|
enabled: true
|
|
## @param exporter.networkPolicy.allowExternal Don't require server label for connections
|
|
## The Policy model to apply. When set to false, only pods with the correct
|
|
## server label will have network access to the ports server is listening
|
|
## on. When true, server will accept connections from any source
|
|
## (with the correct destination port).
|
|
##
|
|
allowExternal: true
|
|
## @param exporter.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
|
##
|
|
allowExternalEgress: true
|
|
## @param exporter.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraIngress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## from:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
extraIngress: []
|
|
## @param exporter.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
|
## e.g:
|
|
## extraEgress:
|
|
## - ports:
|
|
## - port: 1234
|
|
## to:
|
|
## - podSelector:
|
|
## - matchLabels:
|
|
## - role: frontend
|
|
## - podSelector:
|
|
## - matchExpressions:
|
|
## - key: role
|
|
## operator: In
|
|
## values:
|
|
## - frontend
|
|
##
|
|
extraEgress: []
|
|
## @param exporter.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
|
## @param exporter.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
|
##
|
|
ingressNSMatchLabels: {}
|
|
ingressNSPodMatchLabels: {}
|
|
## @section PostgreSQL Parameters
|
|
##
|
|
|
|
## PostgreSQL chart configuration
|
|
## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
|
|
## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart
|
|
## @param postgresql.auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
|
|
## @param postgresql.auth.postgresPassword Password for the "postgres" admin user
|
|
## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials
|
|
## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`)
|
|
## @param postgresql.primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
|
|
## @param postgresql.primary.initdb.scripts [object] Initdb scripts to create Harbor databases
|
|
##
|
|
postgresql:
|
|
enabled: false
|
|
## Override PostgreSQL default image as 14.x is not supported https://goharbor.io/docs/2.4.0/install-config/
|
|
## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql
|
|
## @param postgresql.image.registry [default: REGISTRY_NAME] PostgreSQL image registry
|
|
## @param postgresql.image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository
|
|
## @skip postgresql.image.tag PostgreSQL image tag (immutable tags are recommended)
|
|
## @param postgresql.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
|
##
|
|
image:
|
|
registry: docker.io
|
|
repository: bitnami/postgresql
|
|
tag: 14.15.0-debian-12-r8
|
|
digest: ""
|
|
auth:
|
|
enablePostgresUser: true
|
|
postgresPassword: not-secure-database-password
|
|
existingSecret: ""
|
|
architecture: standalone
|
|
primary:
|
|
extendedConfiguration: |
|
|
max_connections = 1024
|
|
initdb:
|
|
scripts:
|
|
initial-registry.sql: |
|
|
CREATE DATABASE registry ENCODING 'UTF8';
|
|
\c registry;
|
|
CREATE TABLE schema_migrations(version bigint not null primary key, dirty boolean not null);
|
|
## PostgreSQL Primary resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param postgresql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "nano"
|
|
## @param postgresql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## External PostgreSQL configuration
|
|
## All of these values are only used when postgresql.enabled is set to false
|
|
## @param externalDatabase.host Database host
|
|
## @param externalDatabase.port Database port number
|
|
## @param externalDatabase.user Non-root username for Harbor
|
|
## @param externalDatabase.password Password for the non-root username for Harbor
|
|
## @param externalDatabase.sslmode External database ssl mode
|
|
## @param externalDatabase.coreDatabase External database name for core
|
|
## @param externalDatabase.existingSecret The name of an existing secret with database credentials
|
|
## @param externalDatabase.existingSecretPasswordKey Password key on the existing secret
|
|
##
|
|
externalDatabase:
|
|
host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
|
|
port: 5432
|
|
user: {{ .Values.globals.harbor.postgres.username }}
|
|
password: {{ .Values.globals.harbor.postgres.password }}
|
|
sslmode: disable
|
|
coreDatabase: {{ .Values.globals.harbor.postgres.database }}
|
|
existingSecret: ""
|
|
existingSecretPasswordKey: "db-password"
|
|
|
|
## @section Redis® parameters
|
|
##
|
|
|
|
## Redis® chart configuration
|
|
## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml
|
|
## @param redis.enabled Switch to enable or disable the Redis® helm
|
|
## @param redis.auth.enabled Enable password authentication
|
|
## @param redis.auth.password Redis® password
|
|
## @param redis.auth.existingSecret The name of an existing secret with Redis® credentials
|
|
## @param redis.architecture Redis® architecture. Allowed values: `standalone` or `replication`
|
|
## @param redis.sentinel.enabled Use Redis® Sentinel on Redis® pods.
|
|
## @param redis.sentinel.masterSet Master set name
|
|
## @param redis.sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel
|
|
##
|
|
redis:
|
|
enabled: false
|
|
auth:
|
|
enabled: false
|
|
## Redis® password (both master and slave). Defaults to a random 10-character alphanumeric string if not set and auth.enabled is true.
|
|
## It should always be set using the password value or in the existingSecret to avoid issues
|
|
## with Harbor.
|
|
## The password value is ignored if existingSecret is set
|
|
##
|
|
password: ""
|
|
existingSecret: ""
|
|
architecture: standalone
|
|
sentinel:
|
|
enabled: false
|
|
masterSet: mymaster
|
|
service:
|
|
ports:
|
|
sentinel: 26379
|
|
master:
|
|
## Redis® master resource requests and limits
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
## @param redis.master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production).
|
|
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
|
##
|
|
resourcesPreset: "nano"
|
|
## @param redis.master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
|
## Example:
|
|
## resources:
|
|
## requests:
|
|
## cpu: 2
|
|
## memory: 512Mi
|
|
## limits:
|
|
## cpu: 3
|
|
## memory: 1024Mi
|
|
##
|
|
resources: {}
|
|
## External Redis® configuration
|
|
## All of these values are only used when redis.enabled is set to false
|
|
## @param externalRedis.host Redis® host
|
|
## @param externalRedis.port Redis® port number
|
|
## @param externalRedis.password Redis® password
|
|
## @param externalRedis.coreDatabaseIndex Index for core database
|
|
## @param externalRedis.jobserviceDatabaseIndex Index for jobservice database
|
|
## @param externalRedis.registryDatabaseIndex Index for registry database
|
|
## @param externalRedis.trivyAdapterDatabaseIndex Index for trivy adapter database
|
|
##
|
|
externalRedis:
|
|
host: redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local
|
|
port: 6379
|
|
password: {{ .Values.globals.redis.password }}
|
|
coreDatabaseIndex: "0"
|
|
jobserviceDatabaseIndex: "1"
|
|
registryDatabaseIndex: "2"
|
|
trivyAdapterDatabaseIndex: "5"
|
|
## Redis® sentinel configuration
|
|
## @param externalRedis.sentinel.enabled If external redis with sentinal is used, set it to `true`
|
|
## @param externalRedis.sentinel.masterSet Name of sentinel masterSet if sentinel is used
|
|
## @param externalRedis.sentinel.hosts Sentinel hosts and ports in the format
|
|
##
|
|
sentinel:
|
|
enabled: false
|
|
masterSet: "mymaster"
|
|
hosts: ""
|
|
## @section Harbor metrics parameters
|
|
##
|
|
metrics:
|
|
## @param metrics.enabled Whether or not to enable metrics for different
|
|
##
|
|
enabled: true
|
|
## @param metrics.path Path where metrics are exposed
|
|
##
|
|
path: /metrics
|
|
## Prometheus Operator ServiceMonitor configuration
|
|
##
|
|
serviceMonitor:
|
|
## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`)
|
|
##
|
|
enabled: false
|
|
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
|
|
##
|
|
namespace: ""
|
|
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped
|
|
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
|
|
##
|
|
interval: ""
|
|
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
|
|
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
|
|
##
|
|
scrapeTimeout: ""
|
|
## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
|
|
##
|
|
labels: {}
|
|
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
|
|
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
|
|
##
|
|
selector: {}
|
|
## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
|
|
##
|
|
relabelings: []
|
|
## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
|
|
##
|
|
metricRelabelings: []
|
|
## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
|
|
##
|
|
honorLabels: false
|
|
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
|
|
##
|
|
jobLabel: ""
|
|
|