infra
This commit is contained in:
commit
b563d4a9cb
.gitignoreREADME.md
clusters/dolo/flux-system
data/ssh
proxmox
ansible
.gitignoreansible.cfglvm.ymlreset.ymlsite.ymlswarm.yml
collections
dns.ymlinventory
dolo
folly
full
stingray
roles
dns-server
docker-swarm/tasks
gluster/tasks
swarm-bootstrap
docker/stacks
k8s
helmfile.d
00-core.yaml01-databases.yaml02-applications.yaml
charts
values
argo-cd
authentik
cert-manager
certs
ghost
gitea
gitlab
globals
harbor
init-dbs
kube-prometheus-stack
longhorn
mysql
nfs-subdir-external-provisioner
pgadmin4
phpmyadmin
postgres
rancher
redis
traefik
uptime-kuma
manifests/dns
tf
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
.env
|
||||
venv
|
134
README.md
Normal file
134
README.md
Normal file
@ -0,0 +1,134 @@
|
||||
# My Homelab Setup
|
||||
|
||||
## Getting started
|
||||
|
||||
### Dependencies
|
||||
|
||||
Install dependencies (Arch):
|
||||
|
||||
```sh
|
||||
pacman -Sy opentofu kubectl helm helmfile python
|
||||
```
|
||||
|
||||
### Promxox
|
||||
|
||||
We first need to configure a Proxmox user for terraform to act on behalf of and
|
||||
a token for the user.
|
||||
|
||||
```sh
|
||||
# Create the user
|
||||
pveum user add terraform@pve
|
||||
|
||||
# Create a role for the user above
|
||||
pveum role add Terraform -privs "Datastore.Allocate Datastore.AllocateSpace Datastore.AllocateTemplate Datastore.Audit Pool.Allocate Sys.Audit Sys.Console Sys.Modify SDN.Use VM.Allocate VM.Audit VM.Clone VM.Config.CDROM VM.Config.Cloudinit VM.Config.CPU VM.Config.Disk VM.Config.HWType VM.Config.Memory VM.Config.Network VM.Config.Options VM.Migrate VM.Monitor VM.PowerMgmt User.Modify Pool.Audit"
|
||||
|
||||
# Assign the terraform user to the above role
|
||||
pveum aclmod / -user terraform@pve -role Terraform
|
||||
|
||||
# Create the token and save it for later
|
||||
pveum user token add terraform@pve provider --privsep=0
|
||||
```
|
||||
|
||||
### Provisioning with OpenTofu/Terraform
|
||||
|
||||
Create a file `proxmox/tf/credentials.auto.tfvars` with the following content,
|
||||
making sure to replace as necessary:
|
||||
|
||||
```
|
||||
proxmox_api_endpoint = "https://<domain or ip>"
|
||||
proxmox_api_token = "terraform@pve!provider=<token from last step>"
|
||||
```
|
||||
|
||||
Customize the other variables in `proxmox/tf/vars.auto.tfvars` and double check
|
||||
the configuration.
|
||||
|
||||
When ready, run `opentofu apply`. The command might fail the first time if
|
||||
provisioning from scratch, but it seems to be fine when running it a second
|
||||
time.
|
||||
|
||||
### Creating a Docker swarm
|
||||
|
||||
The Docker swarm acts as a launchpad for the rest of the infrastructure. It
|
||||
bootstraps a Portainer, Traefik, and Gitea deployment so that remaining
|
||||
configuration can be done through Portainer and Git.
|
||||
|
||||
```sh
|
||||
# Add SSH keys to known_hosts
|
||||
ansible-inventory -i inventory/dolo --list |\
|
||||
jq -r '._meta.hostvars | keys[]' |\
|
||||
grep 'stingray' |\
|
||||
while read -r line; do
|
||||
ssh-keygen -R "$line"
|
||||
ssh-keyscan -H "$line" >> ~/.ssh/known_hosts
|
||||
done
|
||||
|
||||
# Initialize swarm
|
||||
ansible-playbook -i inventory/stingray swarm.yml
|
||||
```
|
||||
|
||||
Traefik will be listening on hosts:
|
||||
- git.mnke.org
|
||||
- git.stingray.mnke.org
|
||||
- portainer.stingray.mnke.org
|
||||
|
||||
Set DNS records or edit your hosts file to point those domains to a swarm node.
|
||||
|
||||
### Creating a k3s cluster
|
||||
|
||||
Set up Ansible:
|
||||
|
||||
```sh
|
||||
# Tested on Python 3.13.1
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
ansible-galaxy collection install -r proxmox/ansible/collections/requirements.yml
|
||||
```
|
||||
|
||||
Set up the k3s cluster:
|
||||
|
||||
```sh
|
||||
# Necessary because the hosts.yml file contains a relative path to the terraform
|
||||
# project directory
|
||||
cd proxmox/ansible
|
||||
# Remove/scan keys
|
||||
ansible-inventory -i inventory/dolo --list |\
|
||||
jq -r '._meta.hostvars | keys[]' |\
|
||||
while read -r line; do
|
||||
ssh-keygen -R "$line"
|
||||
ssh-keyscan -H "$line" >> ~/.ssh/known_hosts
|
||||
done
|
||||
ansible-playbook lvm.yml site.yml -i inventory/dolo
|
||||
# You should be left with a kubeconfig. Move it to ~/.kube/config. If you
|
||||
# already have a ~/.kube/config file, make sure to back it up first.
|
||||
mv kubeconfig ~/.kube/config
|
||||
# Verify that you can connect to the cluster
|
||||
kubectl get nodes
|
||||
|
||||
# Back to root repo directory
|
||||
cd -
|
||||
# Verify deployment and service
|
||||
kubectl apply -f proxmox/k8s/examples/001-example.yml
|
||||
# This should succeed, and an IP should have been allocated by metallb. Check
|
||||
# with the following command:
|
||||
kubectl describe nginx
|
||||
# Now try checking that the deployment works:
|
||||
curl http://[allocated-ip]
|
||||
```
|
||||
|
||||
### Install Helm charts
|
||||
|
||||
```sh
|
||||
kubectl create secret generic regcred \
|
||||
--from-file=.dockerconfigjson=$HOME/.docker/config.json \
|
||||
--type=kubernetes.io/dockerconfigjson
|
||||
# Assuming from the repo root
|
||||
cd proxmox/k8s/helmfile
|
||||
helmfile sync -f proxmox/k8s/helmfile.d
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
- Some inspiration and guidance was taken from [Andreas Marqvardsen's blog post](https://blog.andreasm.io/2024/01/15/proxmox-with-opentofu-kubespray-and-kubernetes)
|
||||
- An automated setup of a k3s cluster from [Techno Tim's Ansible roles](https://github.com/techno-tim/k3s-ansible)
|
||||
- Inspiration for a minimal docker swarm from [nmarus](https://github.com/nmarus/docker-swarm-ansible/tree/master)
|
12507
clusters/dolo/flux-system/gotk-components.yaml
Normal file
12507
clusters/dolo/flux-system/gotk-components.yaml
Normal file
File diff suppressed because it is too large
Load Diff
27
clusters/dolo/flux-system/gotk-sync.yaml
Normal file
27
clusters/dolo/flux-system/gotk-sync.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
# This manifest was generated by flux. DO NOT EDIT.
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 1m0s
|
||||
ref:
|
||||
branch: master
|
||||
secretRef:
|
||||
name: flux-system
|
||||
url: https://git.mnke.org/tony/homelab.git
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 10m0s
|
||||
path: ./clusters/dolo
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
5
clusters/dolo/flux-system/kustomization.yaml
Normal file
5
clusters/dolo/flux-system/kustomization.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- gotk-components.yaml
|
||||
- gotk-sync.yaml
|
1
data/ssh/id_rsa.pub
Normal file
1
data/ssh/id_rsa.pub
Normal file
@ -0,0 +1 @@
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDc9zu9e7Pabd214TfV0drG7Bw2B1RYSNvcFVrbTlrLR0JX2vZOA9SdvE2VmDABNh8ETddgNRYmDudooqHVvOAtWLKD3O7uPsjfq9pL9OxgYhe/0posS4v8/KL7d7eSWAGUNWpQRB2wSpwf5tJbGAMNmsAaES+6ePJE7EcPhXB6YaJarr3JiJsy7yy6yMMGy9lxUU9rpfi9MJRFUEpklLakWuhrUqQdzmIXigDAhiy2RSKhD4JzwKdEmWPhTjTnMltpa3EXiHIJ+3CHsx0MY4yiG/JYqRZ93shpqFzHw6TwPDTB3GgrQm68TK8Cf05Wl2QPdmcZvd0lbZOZnu4pkpZXXlywp35rKMPS9AsQ+/H+ut9Y0DBRbvClHFDupIHtNOsF1UXcabszfhgou/Uz77ZNlsgJVh0klKzh1Z2FWEGlU9i1TJs4H4OMBALQVXPpEz5vL6fYW3Iw30WUGciF4EZecJEu7bz6pN/RK6F57hjcjK0hi31+BR7ktCku2irB0Ds= tony@titanium
|
1
proxmox/ansible/.gitignore
vendored
Normal file
1
proxmox/ansible/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
kubeconfig
|
712
proxmox/ansible/ansible.cfg
Normal file
712
proxmox/ansible/ansible.cfg
Normal file
@ -0,0 +1,712 @@
|
||||
[defaults]
|
||||
# (boolean) By default, Ansible will issue a warning when received from a task action (module or action plugin).
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;action_warnings=True
|
||||
|
||||
# (list) Accept a list of cowsay templates that are 'safe' to use, set to an empty list if you want to enable all installed templates.
|
||||
;cowsay_enabled_stencils=bud-frogs, bunny, cheese, daemon, default, dragon, elephant-in-snake, elephant, eyes, hellokitty, kitty, luke-koala, meow, milk, moofasa, moose, ren, sheep, small, stegosaurus, stimpy, supermilker, three-eyes, turkey, turtle, tux, udder, vader-koala, vader, www
|
||||
|
||||
# (string) Specify a custom cowsay path or swap in your cowsay implementation of choice.
|
||||
;cowpath=
|
||||
|
||||
# (string) This allows you to choose a specific cowsay stencil for the banners or use 'random' to cycle through them.
|
||||
;cow_selection=default
|
||||
|
||||
# (boolean) This option forces color mode even when running without a TTY or the "nocolor" setting is True.
|
||||
;force_color=False
|
||||
|
||||
# (path) The default root path for Ansible config files on the controller.
|
||||
;home=~/.ansible
|
||||
|
||||
# (boolean) This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
|
||||
;nocolor=False
|
||||
|
||||
# (boolean) If you have cowsay installed but want to avoid the 'cows' (why????), use this.
|
||||
;nocows=False
|
||||
|
||||
# (boolean) Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
|
||||
;any_errors_fatal=False
|
||||
|
||||
# (path) The password file to use for the become plugin. ``--become-password-file``.
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
;become_password_file=
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Become Plugins.
|
||||
;become_plugins=/home/tony/.ansible/plugins/become:/usr/share/ansible/plugins/become
|
||||
|
||||
# (string) Chooses which cache plugin to use, the default 'memory' is ephemeral.
|
||||
;fact_caching=memory
|
||||
|
||||
# (string) Defines connection or path information for the cache plugin.
|
||||
;fact_caching_connection=
|
||||
|
||||
# (string) Prefix to use for cache plugin files/tables.
|
||||
;fact_caching_prefix=ansible_facts
|
||||
|
||||
# (integer) Expiration timeout for the cache plugin data.
|
||||
;fact_caching_timeout=86400
|
||||
|
||||
# (list) List of enabled callbacks, not all callbacks need enabling, but many of those shipped with Ansible do as we don't want them activated by default.
|
||||
;callbacks_enabled=
|
||||
|
||||
# (string) When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
|
||||
;collections_on_ansible_version_mismatch=warning
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
|
||||
|
||||
;collections_path=/home/tony/.ansible/collections:/usr/share/ansible/collections
|
||||
|
||||
# (boolean) A boolean to enable or disable scanning the sys.path for installed collections.
|
||||
;collections_scan_sys_path=True
|
||||
|
||||
# (path) The password file to use for the connection plugin. ``--connection-password-file``.
|
||||
;connection_password_file=
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Action Plugins.
|
||||
;action_plugins=/home/tony/.ansible/plugins/action:/usr/share/ansible/plugins/action
|
||||
|
||||
# (boolean) When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'.
|
||||
# By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, however, users should first consider adding allow_unsafe=True to any lookups that may be expected to contain data that may be run through the templating engine late.
|
||||
;allow_unsafe_lookups=False
|
||||
|
||||
# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting.
|
||||
;ask_pass=False
|
||||
|
||||
# (boolean) This controls whether an Ansible playbook should prompt for a vault password.
|
||||
;ask_vault_pass=False
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Cache Plugins.
|
||||
;cache_plugins=/home/tony/.ansible/plugins/cache:/usr/share/ansible/plugins/cache
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Callback Plugins.
|
||||
;callback_plugins=/home/tony/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Cliconf Plugins.
|
||||
;cliconf_plugins=/home/tony/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Connection Plugins.
|
||||
;connection_plugins=/home/tony/.ansible/plugins/connection:/usr/share/ansible/plugins/connection
|
||||
|
||||
# (boolean) Toggles debug output in Ansible. This is *very* verbose and can hinder multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production.
|
||||
;debug=False
|
||||
|
||||
# (string) This indicates the command to use to spawn a shell under, which is required for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases, it may be left as is.
|
||||
;executable=/bin/sh
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Filter Plugins.
|
||||
;filter_plugins=/home/tony/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
|
||||
|
||||
# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host.
|
||||
# When false, the handlers will not run if a failure has occurred on a host.
|
||||
# This can also be set per play or on the command line. See Handlers and Failure for more details.
|
||||
;force_handlers=False
|
||||
|
||||
# (integer) Maximum number of forks Ansible will use to execute tasks on target hosts.
|
||||
;forks=5
|
||||
|
||||
# (string) This setting controls the default policy of fact gathering (facts discovered about remote systems).
|
||||
# This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin.
|
||||
;gathering=implicit
|
||||
|
||||
# (string) This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
|
||||
# This does not affect variables whose values are scalars (integers, strings) or arrays.
|
||||
# **WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) nonportable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it.
|
||||
# We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups to create merged versions of the individual variables. In our experience, this is rarely needed and is a sign that too much complexity has been introduced into the data structures and plays.
|
||||
# For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, but the setting itself affects all sources and makes debugging even harder.
|
||||
# All playbooks and roles in the official examples repos assume the default for this setting.
|
||||
# Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables. For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
|
||||
# The Ansible project recommends you **avoid ``merge`` for new projects*
|
||||
# It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it. New projects should **avoid 'merge'**.
|
||||
;hash_behaviour=replace
|
||||
|
||||
# (pathlist) Comma-separated list of Ansible inventory sources
|
||||
;inventory=/etc/ansible/hosts
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for HttpApi Plugins.
|
||||
;httpapi_plugins=/home/tony/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi
|
||||
|
||||
# (float) This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios when UI responsiveness is not required but CPU usage might be a concern.
|
||||
# The default corresponds to the value hardcoded in Ansible <= 2.1
|
||||
;internal_poll_interval=0.001
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Inventory Plugins.
|
||||
;inventory_plugins=/home/tony/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory
|
||||
|
||||
# (string) This is a developer-specific feature that allows enabling additional Jinja2 extensions.
|
||||
# See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)
|
||||
;jinja2_extensions=[]
|
||||
|
||||
# (boolean) This option preserves variable types during template operations.
|
||||
;jinja2_native=False
|
||||
|
||||
# (boolean) Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
|
||||
# If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
|
||||
;keep_remote_files=False
|
||||
|
||||
# (boolean) Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``.
|
||||
;bin_ansible_callbacks=False
|
||||
|
||||
# (tmppath) Temporary directory for Ansible to use on the controller.
|
||||
;local_tmp=/home/tony/.ansible/tmp
|
||||
|
||||
# (list) List of logger names to filter out of the log file.
|
||||
;log_filter=
|
||||
|
||||
# (path) File to which Ansible will log on the controller.
|
||||
# When not set the logging is disabled.
|
||||
;log_path=
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Lookup Plugins.
|
||||
;lookup_plugins=/home/tony/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup
|
||||
|
||||
# (string) Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant to those two modules.
|
||||
;ansible_managed=Ansible managed
|
||||
|
||||
# (string) This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
|
||||
;module_args=
|
||||
|
||||
# (string) Compression scheme to use when transferring Python modules to the target.
|
||||
;module_compression=ZIP_DEFLATED
|
||||
|
||||
# (string) Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``.
|
||||
;module_name=command
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Modules.
|
||||
;library=/home/tony/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Module utils files, which are shared by modules.
|
||||
;module_utils=/home/tony/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Netconf Plugins.
|
||||
;netconf_plugins=/home/tony/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf
|
||||
|
||||
# (boolean) Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures.
|
||||
;no_log=False
|
||||
|
||||
# (boolean) Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts, this will disable a newer style PowerShell modules from writing to the event log.
|
||||
;no_target_syslog=False
|
||||
|
||||
# (raw) What templating should return as a 'null' value. When not set it will let Jinja2 decide.
|
||||
;null_representation=
|
||||
|
||||
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed.
|
||||
;poll_interval=15
|
||||
|
||||
# (path) Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying ``--private-key`` with every invocation.
|
||||
;private_key_file=
|
||||
|
||||
# (boolean) By default, imported roles publish their variables to the play and other roles, this setting can avoid that.
|
||||
# This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook.
|
||||
# Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can individually override this via the C(public) parameter.
|
||||
# Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
|
||||
;private_role_vars=False
|
||||
|
||||
# (integer) Port to use in remote connections, when blank it will use the connection plugin default.
|
||||
;remote_port=
|
||||
|
||||
# (string) Sets the login user for the target machines
|
||||
# When blank it uses the connection plugin's default, normally the user currently executing Ansible.
|
||||
;remote_user=
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Roles.
|
||||
;roles_path=/home/tony/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
|
||||
|
||||
# (string) Set the main callback used to display Ansible output. You can only have one at a time.
|
||||
# You can have many other callbacks, but just one can be in charge of stdout.
|
||||
# See :ref:`callback_plugins` for a list of available options.
|
||||
;stdout_callback=default
|
||||
|
||||
# (string) Set the default strategy used for plays.
|
||||
;strategy=linear
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Strategy Plugins.
|
||||
;strategy_plugins=/home/tony/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy
|
||||
|
||||
# (boolean) Toggle the use of "su" for tasks.
|
||||
;su=False
|
||||
|
||||
# (string) Syslog facility to use when Ansible logs to the remote target.
|
||||
;syslog_facility=LOG_USER
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Terminal Plugins.
|
||||
;terminal_plugins=/home/tony/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Test Plugins.
|
||||
;test_plugins=/home/tony/.ansible/plugins/test:/usr/share/ansible/plugins/test
|
||||
|
||||
# (integer) This is the default timeout for connection plugins to use.
|
||||
;timeout=10
|
||||
|
||||
# (string) Can be any connection plugin available to your ansible installation.
|
||||
# There is also a (DEPRECATED) special 'smart' option, that will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions.
|
||||
;transport=ssh
|
||||
|
||||
# (boolean) When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
|
||||
# Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written.
|
||||
;error_on_undefined_vars=True
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Vars Plugins.
|
||||
;vars_plugins=/home/tony/.ansible/plugins/vars:/usr/share/ansible/plugins/vars
|
||||
|
||||
# (string) The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The ``--encrypt-vault-id`` CLI option overrides the configured value.
|
||||
;vault_encrypt_identity=
|
||||
|
||||
# (string) The label to use for the default vault id label in cases where a vault id label is not provided.
|
||||
;vault_identity=default
|
||||
|
||||
# (list) A list of vault-ids to use by default. Equivalent to multiple ``--vault-id`` args. Vault-ids are tried in order.
|
||||
;vault_identity_list=
|
||||
|
||||
# (string) If true, decrypting vaults with a vault id will only try the password from the matching vault-id.
|
||||
;vault_id_match=False
|
||||
|
||||
# (path) The vault password file to use. Equivalent to ``--vault-password-file`` or ``--vault-id``.
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
;vault_password_file=
|
||||
|
||||
# (integer) Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
|
||||
;verbosity=0
|
||||
|
||||
# (boolean) Toggle to control the showing of deprecation warnings
|
||||
;deprecation_warnings=True
|
||||
|
||||
# (boolean) Toggle to control showing warnings related to running devel.
|
||||
;devel_warning=True
|
||||
|
||||
# (boolean) Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header.
|
||||
# This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed.
|
||||
# If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks that have sensitive values :ref:`keep_secret_data` for more information.
|
||||
;display_args_to_stdout=False
|
||||
|
||||
# (boolean) Toggle to control displaying skipped task/host entries in a task in the default callback.
|
||||
;display_skipped_hosts=True
|
||||
|
||||
# (string) Root docsite URL used to generate docs URLs in warning/error text; must be an absolute URL with a valid scheme and trailing slash.
|
||||
;docsite_root_url=https://docs.ansible.com/ansible-core/
|
||||
|
||||
# (pathspec) Colon-separated paths in which Ansible will search for Documentation Fragments Plugins.
|
||||
;doc_fragment_plugins=/home/tony/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments
|
||||
|
||||
# (string) By default, Ansible will issue a warning when a duplicate dict key is encountered in YAML.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;duplicate_dict_key=warn
|
||||
|
||||
# (string) for the cases in which Ansible needs to return a file within an editor, this chooses the application to use.
|
||||
;editor=vi
|
||||
|
||||
# (boolean) Whether or not to enable the task debugger, this previously was done as a strategy plugin.
|
||||
# Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
|
||||
# a task is failed on unreachable. Use the debugger keyword for more flexibility.
|
||||
;enable_task_debugger=False
|
||||
|
||||
# (boolean) Toggle to allow missing handlers to become a warning instead of an error when notifying.
|
||||
;error_on_missing_handler=True
|
||||
|
||||
# (list) Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
|
||||
# If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figure it out).
|
||||
# This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
|
||||
;facts_modules=smart
|
||||
|
||||
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host.
|
||||
# Please read the documentation of the specific connection plugin used for details.
|
||||
;host_key_checking=True
|
||||
|
||||
# (boolean) Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
|
||||
# Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module.
|
||||
;inject_facts_as_vars=True
|
||||
|
||||
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
|
||||
;interpreter_python=auto
|
||||
|
||||
# (boolean) If 'false', invalid attributes for a task will result in warnings instead of errors.
|
||||
;invalid_task_attribute_failed=True
|
||||
|
||||
# (boolean) By default, Ansible will issue a warning when there are no hosts in the inventory.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;localhost_warning=True
|
||||
|
||||
# (int) This will set log verbosity if higher than the normal display verbosity, otherwise it will match that.
|
||||
;log_verbosity=
|
||||
|
||||
# (int) Maximum size of files to be considered for diff display.
|
||||
;max_diff_size=104448
|
||||
|
||||
# (list) List of extensions to ignore when looking for modules to load.
|
||||
# This is for rejecting script and binary module fallback extensions.
|
||||
;module_ignore_exts=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .yaml, .yml, .ini
|
||||
|
||||
# (bool) Enables whether module responses are evaluated for containing non-UTF-8 data.
|
||||
# Disabling this may result in unexpected behavior.
|
||||
# Only ansible-core should evaluate this configuration.
|
||||
;module_strict_utf8_response=True
|
||||
|
||||
# (list) TODO: write it
|
||||
;network_group_modules=eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos
|
||||
|
||||
# (boolean) Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviors in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows the user to return to that behavior.
|
||||
;old_plugin_cache_clear=False
|
||||
|
||||
# (string) for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use.
|
||||
;pager=less
|
||||
|
||||
# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
|
||||
;playbook_dir=
|
||||
|
||||
# (string) This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars.
|
||||
;playbook_vars_root=top
|
||||
|
||||
# (path) A path to configuration for filtering which plugins installed on the system are allowed to be used.
|
||||
# See :ref:`plugin_filtering_config` for details of the filter file's format.
|
||||
# The default is /etc/ansible/plugin_filters.yml
|
||||
;plugin_filters_cfg=
|
||||
|
||||
# (string) Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default value of 0 does not attempt to adjust existing system-defined limits.
|
||||
;python_module_rlimit_nofile=0
|
||||
|
||||
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
|
||||
;retry_files_enabled=False
|
||||
|
||||
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
|
||||
# This file will be overwritten after each run with the list of failed hosts from all plays.
|
||||
;retry_files_save_path=
|
||||
|
||||
# (str) This setting can be used to optimize vars_plugin usage depending on the user's inventory size and play selection.
|
||||
;run_vars_plugins=demand
|
||||
|
||||
# (bool) This adds the custom stats set via the set_stats plugin to the default output.
|
||||
;show_custom_stats=False
|
||||
|
||||
# (string) Action to take when a module parameter value is converted to a string (this does not affect variables). For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. will be converted by the YAML parser unless fully quoted.
|
||||
# Valid options are 'error', 'warn', and 'ignore'.
|
||||
# Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
|
||||
;string_conversion_action=warn
|
||||
|
||||
# (boolean) Allows disabling of warnings related to potential issues on the system running Ansible itself (not on the managed hosts).
|
||||
# These may include warnings about third-party packages or other conditions that should be resolved if possible.
|
||||
;system_warnings=True
|
||||
|
||||
# (string) A string to insert into target logging for tracking purposes
|
||||
;target_log_info=
|
||||
|
||||
# (boolean) This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified.
|
||||
# True specifies that the debugger will honor ignore_errors, and False will not honor ignore_errors.
|
||||
;task_debugger_ignore_errors=True
|
||||
|
||||
# (integer) Set the maximum time (in seconds) for a task action to execute in.
|
||||
# Timeout runs independently from templating or looping. It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task.
|
||||
# When the action execution exceeds the timeout, Ansible interrupts the process. This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process.
|
||||
# If set to 0 (the default) there is no timeout.
|
||||
;task_timeout=0
|
||||
|
||||
# (string) Make ansible transform invalid characters in group names supplied by inventory sources.
|
||||
;force_valid_group_names=never
|
||||
|
||||
# (boolean) Toggles the use of persistence for connections.
|
||||
;use_persistent_connections=False
|
||||
|
||||
# (bool) A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group. Metadata containing unexpected fields or value types will produce a warning when this is True.
|
||||
;validate_action_group_metadata=True
|
||||
|
||||
# (list) Accept list for variable plugins that require it.
|
||||
;vars_plugins_enabled=host_group_vars
|
||||
|
||||
# (list) Allows to change the group variable precedence merge order.
|
||||
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
|
||||
|
||||
# (string) The salt to use for the vault encryption. If it is not provided, a random salt will be used.
|
||||
;vault_encrypt_salt=
|
||||
|
||||
# (bool) Force 'verbose' option to use stderr instead of stdout
|
||||
;verbose_to_stderr=False
|
||||
|
||||
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
|
||||
# This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the overall maximum duration the task can take will be extended by the amount specified here.
|
||||
;win_async_startup_timeout=5
|
||||
|
||||
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
|
||||
# This affects vars_files, include_vars, inventory and vars plugins among others.
|
||||
;yaml_valid_extensions=.yml, .yaml, .json
|
||||
|
||||
|
||||
[privilege_escalation]
|
||||
# (boolean) Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method.
|
||||
;agnostic_become_prompt=True
|
||||
|
||||
# (boolean) When ``False``(default), Ansible will skip using become if the remote user is the same as the become user, as this is normally a redundant operation. In other words root sudo to root.
|
||||
# If ``True``, this forces Ansible to use the become plugin anyways as there are cases in which this is needed.
|
||||
;become_allow_same_user=False
|
||||
|
||||
# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login.
|
||||
;become=False
|
||||
|
||||
# (boolean) Toggle to prompt for privilege escalation password.
|
||||
;become_ask_pass=False
|
||||
|
||||
# (string) executable to use for privilege escalation, otherwise Ansible will depend on PATH.
|
||||
;become_exe=
|
||||
|
||||
# (string) Flags to pass to the privilege escalation executable.
|
||||
;become_flags=
|
||||
|
||||
# (string) Privilege escalation method to use when `become` is enabled.
|
||||
;become_method=sudo
|
||||
|
||||
# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
|
||||
;become_user=root
|
||||
|
||||
|
||||
[persistent_connection]
|
||||
# (path) Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
|
||||
# If null, ansible will start with the same directory as the ansible script.
|
||||
;ansible_connection_path=
|
||||
|
||||
# (int) This controls the amount of time to wait for a response from a remote device before timing out a persistent connection.
|
||||
;command_timeout=30
|
||||
|
||||
# (integer) This controls the retry timeout for persistent connection to connect to the local domain socket.
|
||||
;connect_retry_timeout=15
|
||||
|
||||
# (integer) This controls how long the persistent connection will remain idle before it is destroyed.
|
||||
;connect_timeout=30
|
||||
|
||||
# (path) Path to the socket to be used by the connection persistence system.
|
||||
;control_path_dir=/home/tony/.ansible/pc
|
||||
|
||||
|
||||
[connection]
|
||||
# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
|
||||
# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer.
|
||||
# It can result in a very significant performance improvement when enabled.
|
||||
# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default.
|
||||
# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
|
||||
;pipelining=False
|
||||
|
||||
|
||||
[colors]
|
||||
# (string) Defines the color to use on 'Changed' task status.
|
||||
;changed=yellow
|
||||
|
||||
# (string) Defines the default color to use for ansible-console.
|
||||
;console_prompt=white
|
||||
|
||||
# (string) Defines the color to use when emitting debug messages.
|
||||
;debug=dark gray
|
||||
|
||||
# (string) Defines the color to use when emitting deprecation messages.
|
||||
;deprecate=purple
|
||||
|
||||
# (string) Defines the color to use when showing added lines in diffs.
|
||||
;diff_add=green
|
||||
|
||||
# (string) Defines the color to use when showing diffs.
|
||||
;diff_lines=cyan
|
||||
|
||||
# (string) Defines the color to use when showing removed lines in diffs.
|
||||
;diff_remove=red
|
||||
|
||||
# (string) Defines the color to use when emitting a constant in the ansible-doc output.
|
||||
;doc_constant=dark gray
|
||||
|
||||
# (string) Defines the color to use when emitting a deprecated value in the ansible-doc output.
|
||||
;doc_deprecated=magenta
|
||||
|
||||
# (string) Defines the color to use when emitting a link in the ansible-doc output.
|
||||
;doc_link=cyan
|
||||
|
||||
# (string) Defines the color to use when emitting a module name in the ansible-doc output.
|
||||
;doc_module=yellow
|
||||
|
||||
# (string) Defines the color to use when emitting a plugin name in the ansible-doc output.
|
||||
;doc_plugin=yellow
|
||||
|
||||
# (string) Defines the color to use when emitting cross-reference in the ansible-doc output.
|
||||
;doc_reference=magenta
|
||||
|
||||
# (string) Defines the color to use when emitting error messages.
|
||||
;error=red
|
||||
|
||||
# (string) Defines the color to use for highlighting.
|
||||
;highlight=white
|
||||
|
||||
# (string) Defines the color to use when showing 'Included' task status.
|
||||
;included=cyan
|
||||
|
||||
# (string) Defines the color to use when showing 'OK' task status.
|
||||
;ok=green
|
||||
|
||||
# (string) Defines the color to use when showing 'Skipped' task status.
|
||||
;skip=cyan
|
||||
|
||||
# (string) Defines the color to use on 'Unreachable' status.
|
||||
;unreachable=bright red
|
||||
|
||||
# (string) Defines the color to use when emitting verbose messages. In other words, those that show with '-v's.
|
||||
;verbose=blue
|
||||
|
||||
# (string) Defines the color to use when emitting warning messages.
|
||||
;warn=bright purple
|
||||
|
||||
|
||||
[selinux]
|
||||
# (boolean) This setting causes libvirt to connect to LXC containers by passing ``--noseclabel`` parameter to ``virsh`` command. This is necessary when running on systems which do not have SELinux.
|
||||
;libvirt_lxc_noseclabel=False
|
||||
|
||||
# (list) Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list without causing fatal errors.
|
||||
# Data corruption may occur and writes are not always verified when a filesystem is in the list.
|
||||
;special_context_filesystems=fuse, nfs, vboxsf, ramfs, 9p, vfat
|
||||
|
||||
|
||||
[diff]
|
||||
# (bool) Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
|
||||
;always=False
|
||||
|
||||
# (integer) Number of lines of context to show when displaying the differences between files.
|
||||
;context=3
|
||||
|
||||
|
||||
[galaxy]
|
||||
# (path) The directory that stores cached responses from a Galaxy server.
|
||||
# This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
|
||||
# Cache files inside this dir will be ignored if they are world writable.
|
||||
;cache_dir=/home/tony/.ansible/galaxy_cache
|
||||
|
||||
# (bool) whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`.
|
||||
;collections_path_warning=True
|
||||
|
||||
# (path) Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
|
||||
;collection_skeleton=
|
||||
|
||||
# (list) patterns of files to ignore inside a Galaxy collection skeleton directory.
|
||||
;collection_skeleton_ignore=^.git$, ^/.git_keep$
|
||||
|
||||
# (bool) Disable GPG signature verification during collection installation.
|
||||
;disable_gpg_verify=False
|
||||
|
||||
# (bool) Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when outputting the stdout to a file.
|
||||
# This config option controls whether the display wheel is shown or not.
|
||||
# The default is to show the display wheel if stdout has a tty.
|
||||
;display_progress=
|
||||
|
||||
# (path) Configure the keyring used for GPG signature verification during collection installation and verification.
|
||||
;gpg_keyring=
|
||||
|
||||
# (boolean) If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate.
|
||||
;ignore_certs=
|
||||
|
||||
# (list) A list of GPG status codes to ignore during GPG signature verification. See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
|
||||
# If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`, signature verification will fail even if all error codes are ignored.
|
||||
;ignore_signature_status_codes=
|
||||
|
||||
# (str) The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
|
||||
# This should be a positive integer or all to indicate all signatures must successfully validate the collection.
|
||||
# Prepend + to the value to fail if no valid signatures are found for the collection.
|
||||
;required_valid_signature_count=1
|
||||
|
||||
# (path) Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
|
||||
;role_skeleton=
|
||||
|
||||
# (list) patterns of files to ignore inside a Galaxy role or collection skeleton directory.
|
||||
;role_skeleton_ignore=^.git$, ^/.git_keep$
|
||||
|
||||
# (string) URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source.
|
||||
;server=https://galaxy.ansible.com
|
||||
|
||||
# (list) A list of Galaxy servers to use when installing a collection.
|
||||
# The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
|
||||
# See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.
|
||||
# The order of servers in this list is used as the order in which a collection is resolved.
|
||||
# Setting this config option will ignore the :ref:`galaxy_server` config option.
|
||||
;server_list=
|
||||
|
||||
# (int) The default timeout for Galaxy API calls. Galaxy servers that don't configure a specific timeout will fall back to this value.
|
||||
;server_timeout=60
|
||||
|
||||
# (path) Local path to galaxy access token file
|
||||
;token_path=/home/tony/.ansible/galaxy_token
|
||||
|
||||
|
||||
[inventory]
|
||||
# (string) This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it.
|
||||
;host_pattern_mismatch=warning
|
||||
|
||||
# (boolean) If 'true', it is a fatal error when any given inventory source cannot be successfully parsed by any available inventory plugin; otherwise, this situation only attracts a warning.
|
||||
|
||||
;any_unparsed_is_failed=False
|
||||
|
||||
# (bool) Toggle to turn on inventory caching.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache=False
|
||||
|
||||
# (string) The plugin for caching inventory.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_plugin=
|
||||
|
||||
# (string) The inventory cache connection.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_connection=
|
||||
|
||||
# (string) The table prefix for the cache plugin.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_prefix=ansible_inventory_
|
||||
|
||||
# (string) Expiration timeout for the inventory cache plugin data.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_timeout=3600
|
||||
|
||||
# (list) List of enabled inventory plugins, it also determines the order in which they are used.
|
||||
;enable_plugins=host_list, script, auto, yaml, ini, toml
|
||||
|
||||
# (bool) Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
|
||||
;export=False
|
||||
|
||||
# (list) List of extensions to ignore when using a directory as an inventory source.
|
||||
;ignore_extensions=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .orig, .ini, .cfg, .retry
|
||||
|
||||
# (list) List of patterns to ignore when using a directory as an inventory source.
|
||||
;ignore_patterns=
|
||||
|
||||
# (bool) If 'true' it is a fatal error if every single potential inventory source fails to parse, otherwise, this situation will only attract a warning.
|
||||
|
||||
;unparsed_is_failed=False
|
||||
|
||||
# (boolean) By default, Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;inventory_unparsed_warning=True
|
||||
|
||||
|
||||
[netconf_connection]
|
||||
# (string) This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump host ssh settings should be present in ~/.ssh/config file, alternatively it can be set to custom ssh configuration file path to read the bastion/jump host settings.
|
||||
;ssh_config=
|
||||
|
||||
|
||||
[paramiko_connection]
|
||||
# (boolean) TODO: write it
|
||||
;host_key_auto_add=False
|
||||
|
||||
# (boolean) TODO: write it
|
||||
;look_for_keys=True
|
||||
|
||||
|
||||
[jinja2]
|
||||
# (list) This list of filters avoids 'type conversion' when templating variables.
|
||||
# Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
|
||||
;dont_type_filters=string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json
|
||||
|
||||
|
||||
[tags]
|
||||
# (list) default list of tags to run in your plays, Skip Tags has precedence.
|
||||
;run=
|
||||
|
||||
# (list) default list of tags to skip in your plays, has precedence over Run Tags
|
||||
;skip=
|
||||
|
10
proxmox/ansible/collections/requirements.yml
Normal file
10
proxmox/ansible/collections/requirements.yml
Normal file
@ -0,0 +1,10 @@
|
||||
collections:
|
||||
- name: ansible.utils
|
||||
- name: community.general
|
||||
- name: community.docker
|
||||
- name: ansible.posix
|
||||
- name: kubernetes.core
|
||||
- name: cloud.terraform
|
||||
- name: https://github.com/techno-tim/k3s-ansible.git
|
||||
type: git
|
||||
version: master
|
6
proxmox/ansible/dns.yml
Normal file
6
proxmox/ansible/dns.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Set up DNS server
|
||||
hosts: dns_server
|
||||
remote_user: ubuntu
|
||||
become: true
|
||||
roles: [dns-server]
|
5
proxmox/ansible/inventory/dolo/01-hosts.yml
Normal file
5
proxmox/ansible/inventory/dolo/01-hosts.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
plugin: "cloud.terraform.terraform_provider"
|
||||
# defaults to terraform, but we're using tofu
|
||||
binary_path: "/usr/bin/tofu"
|
||||
project_path: "../tf"
|
9
proxmox/ansible/inventory/dolo/02-hosts.yml
Normal file
9
proxmox/ansible/inventory/dolo/02-hosts.yml
Normal file
@ -0,0 +1,9 @@
|
||||
plugin: constructed
|
||||
strict: true
|
||||
|
||||
groups:
|
||||
# Pick out only the dolo nodes
|
||||
master: group_names | intersect(['dolo_master', 'dolo']) | length == 2
|
||||
node: group_names | intersect(['dolo_node', 'dolo']) | length == 2
|
||||
k3s_cluster: group_names | intersect(['dolo_k3s_cluster', 'dolo']) | length == 2
|
||||
lvm: group_names | intersect(['dolo_storage', 'dolo']) | length == 2
|
1
proxmox/ansible/inventory/dolo/group_vars/all/.gitignore
vendored
Normal file
1
proxmox/ansible/inventory/dolo/group_vars/all/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yml
|
135
proxmox/ansible/inventory/dolo/group_vars/all/main.yml
Normal file
135
proxmox/ansible/inventory/dolo/group_vars/all/main.yml
Normal file
@ -0,0 +1,135 @@
|
||||
---
|
||||
cluster_name: dolo
|
||||
|
||||
k3s_version: v1.30.2+k3s2
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ubuntu
|
||||
systemd_dir: /etc/systemd/system
|
||||
|
||||
lvm:
|
||||
pv_disks:
|
||||
- /dev/sdb
|
||||
vg_name: vg1
|
||||
lv_name: pvs
|
||||
lv_size: +100%FREE
|
||||
fs_type: ext4
|
||||
mount_path: /mnt/lvm-pvs
|
||||
|
||||
# Set your timezone
|
||||
system_timezone: America/Vancouver
|
||||
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: eth0
|
||||
|
||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||
cluster_cidr: 10.52.0.0/16
|
||||
|
||||
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||
cilium_bgp: false
|
||||
|
||||
# enable kube-vip ARP broadcasts
|
||||
kube_vip_arp: true
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: 10.0.185.1
|
||||
|
||||
# k3s_token is required masters can talk together securely
|
||||
# this token should be alpha numeric only
|
||||
k3s_token: "{{ secrets.k3s_token }}"
|
||||
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: "{{ ansible_facts[flannel_iface]['ipv4']['address'] }}"
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico or cilium
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if calico_iface is defined or cilium_iface is defined %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: v0.8.2
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: native
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: layer2
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: v0.14.8
|
||||
metal_lb_controller_tag_version: v0.14.8
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: 10.0.185.128-10.0.185.136
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||
# containers are significantly more resource efficient compared to full VMs.
|
||||
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
|
||||
# VMs would use a significant portion of your available resources.
|
||||
proxmox_lxc_configure: false
|
||||
|
||||
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||
# (harbor / nexus / docker's official registry / etc).
|
||||
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||
# (which is still needed for downloading the k3s binary and such).
|
||||
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||
custom_registries: false
|
||||
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||
# configs:
|
||||
# "registry.domain.com":
|
||||
# auth:
|
||||
# username: yourusername
|
||||
# password: yourpassword
|
||||
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||
# in your deployments.
|
||||
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||
# you can just remove those from the mirrors: section.
|
||||
custom_registries_yaml: |
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/dockerhub"
|
||||
quay.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/quayio"
|
||||
ghcr.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/ghcrio"
|
||||
registry.domain.com:
|
||||
endpoint:
|
||||
- "https://registry.domain.com"
|
||||
|
||||
configs:
|
||||
"registry.domain.com":
|
||||
auth:
|
||||
username: yourusername
|
||||
password: yourpassword
|
5
proxmox/ansible/inventory/folly/01-hosts.yml
Normal file
5
proxmox/ansible/inventory/folly/01-hosts.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
plugin: "cloud.terraform.terraform_provider"
|
||||
# defaults to terraform, but we're using tofu
|
||||
binary_path: "/usr/bin/tofu"
|
||||
project_path: "../tf"
|
8
proxmox/ansible/inventory/folly/02-hosts.yml
Normal file
8
proxmox/ansible/inventory/folly/02-hosts.yml
Normal file
@ -0,0 +1,8 @@
|
||||
plugin: constructed
|
||||
strict: true
|
||||
|
||||
groups:
|
||||
master: group_names | intersect(['folly_master', 'folly']) | length == 2
|
||||
node: group_names | intersect(['folly_node', 'folly']) | length == 2
|
||||
k3s_cluster: group_names | intersect(['folly_k3s_cluster', 'folly']) | length == 2
|
||||
lvm: group_names | intersect(['folly_storage', 'folly']) | length == 2
|
1
proxmox/ansible/inventory/folly/group_vars/all/.gitignore
vendored
Normal file
1
proxmox/ansible/inventory/folly/group_vars/all/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yml
|
135
proxmox/ansible/inventory/folly/group_vars/all/main.yml
Normal file
135
proxmox/ansible/inventory/folly/group_vars/all/main.yml
Normal file
@ -0,0 +1,135 @@
|
||||
---
|
||||
cluster_name: folly
|
||||
|
||||
lvm:
|
||||
pv_disks:
|
||||
- /dev/sdb
|
||||
vg_name: vg1
|
||||
lv_name: pvs
|
||||
lv_size: +100%FREE
|
||||
fs_type: ext4
|
||||
mount_path: /mnt/lvm-pvs
|
||||
|
||||
k3s_version: v1.30.2+k3s2
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ubuntu
|
||||
systemd_dir: /etc/systemd/system
|
||||
|
||||
# Set your timezone
|
||||
system_timezone: America/Vancouver
|
||||
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: eth0
|
||||
|
||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||
cluster_cidr: 10.52.0.0/16
|
||||
|
||||
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||
cilium_bgp: false
|
||||
|
||||
# enable kube-vip ARP broadcasts
|
||||
kube_vip_arp: true
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: 10.0.186.1
|
||||
|
||||
# k3s_token is required masters can talk together securely
|
||||
# this token should be alpha numeric only
|
||||
k3s_token: "{{ secrets.k3s_token }}"
|
||||
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: "{{ ansible_facts[flannel_iface]['ipv4']['address'] }}"
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico or cilium
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if calico_iface is defined or cilium_iface is defined %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: v0.8.2
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: native
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: layer2
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: v0.14.8
|
||||
metal_lb_controller_tag_version: v0.14.8
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: 10.0.186.128-10.0.186.136
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||
# containers are significantly more resource efficient compared to full VMs.
|
||||
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
|
||||
# VMs would use a significant portion of your available resources.
|
||||
proxmox_lxc_configure: false
|
||||
|
||||
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||
# (harbor / nexus / docker's official registry / etc).
|
||||
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||
# (which is still needed for downloading the k3s binary and such).
|
||||
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||
custom_registries: false
|
||||
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||
# configs:
|
||||
# "registry.domain.com":
|
||||
# auth:
|
||||
# username: yourusername
|
||||
# password: yourpassword
|
||||
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||
# in your deployments.
|
||||
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||
# you can just remove those from the mirrors: section.
|
||||
custom_registries_yaml: |
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/dockerhub"
|
||||
quay.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/quayio"
|
||||
ghcr.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/ghcrio"
|
||||
registry.domain.com:
|
||||
endpoint:
|
||||
- "https://registry.domain.com"
|
||||
|
||||
configs:
|
||||
"registry.domain.com":
|
||||
auth:
|
||||
username: yourusername
|
||||
password: yourpassword
|
6
proxmox/ansible/inventory/full/01-hosts.yml
Normal file
6
proxmox/ansible/inventory/full/01-hosts.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
plugin: "cloud.terraform.terraform_provider"
|
||||
# defaults to terraform, but we're using tofu
|
||||
binary_path: "/usr/bin/tofu"
|
||||
project_path: "../tf"
|
||||
|
2
proxmox/ansible/inventory/full/02-hosts.yml
Normal file
2
proxmox/ansible/inventory/full/02-hosts.yml
Normal file
@ -0,0 +1,2 @@
|
||||
plugin: ansible.builtin.constructed
|
||||
strict: true
|
3
proxmox/ansible/inventory/full/group_vars/all/main.yml
Normal file
3
proxmox/ansible/inventory/full/group_vars/all/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
dns_server:
|
||||
admin_username: "{{ secrets.admin_username }}"
|
||||
admin_password: "{{ secrets.admin_password }}"
|
@ -0,0 +1,3 @@
|
||||
secrets:
|
||||
admin_username: admin
|
||||
admin_password: "r5qNySaG8VdNqhrAk5pIdbCr3+JeV1WqdvF4TPnUd3c="
|
5
proxmox/ansible/inventory/stingray/01-hosts.yml
Normal file
5
proxmox/ansible/inventory/stingray/01-hosts.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
plugin: "cloud.terraform.terraform_provider"
|
||||
# defaults to terraform, but we're using tofu
|
||||
binary_path: "/usr/bin/tofu"
|
||||
project_path: "../tf"
|
14
proxmox/ansible/inventory/stingray/02-hosts.yml
Normal file
14
proxmox/ansible/inventory/stingray/02-hosts.yml
Normal file
@ -0,0 +1,14 @@
|
||||
plugin: constructed
|
||||
strict: true
|
||||
|
||||
groups:
|
||||
swarm_managers: group_names | intersect(['stingray_manager', 'stingray']) | length == 2
|
||||
# haproxy only on the first manager.
|
||||
# Using the special variable "groups" doesn't work here (probably because
|
||||
# we're constructing it), so we can't do something like
|
||||
# `inventory_hostname == groups['stringray_manager'][0]`
|
||||
haproxy: group_names | intersect(['stingray_manager', 'stingray']) | length == 2 and
|
||||
"-01" in inventory_hostname
|
||||
swarm_workers: group_names | intersect(['stingray_worker', 'stingray']) | length == 2
|
||||
gluster_nodes: group_names | intersect(['stingray']) | length == 1
|
||||
swarm: group_names | intersect(['stingray']) | length == 1
|
1
proxmox/ansible/inventory/stingray/group_vars/all/.gitignore
vendored
Normal file
1
proxmox/ansible/inventory/stingray/group_vars/all/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yml
|
25
proxmox/ansible/inventory/stingray/group_vars/all/main.yml
Normal file
25
proxmox/ansible/inventory/stingray/group_vars/all/main.yml
Normal file
@ -0,0 +1,25 @@
|
||||
ansible_user: ubuntu
|
||||
|
||||
app_domain_name: stingray.mnke.org
|
||||
|
||||
gluster_volume_path: /glusterfs/bricks
|
||||
gluster_volume_name: gfs
|
||||
gluster_mount_path: /mnt/gfs
|
||||
device2_hdd_dev: /dev/sda
|
||||
|
||||
portainer_app_name: portainer
|
||||
portainer_admin_password: "{{ secrets.portainer_admin_password }}"
|
||||
portainer_agent_secret: "{{ secrets.portainer_agent_secret }}"
|
||||
|
||||
cf_dns_api_token: "{{ secrets.cf_dns_api_token }}"
|
||||
cf_email: tonydu121@hotmail.com
|
||||
|
||||
traefik_listen_port: 80
|
||||
traefik_secure_listen_port: 443
|
||||
traefik_admin_port: 8080
|
||||
traefik_admin_user: admin
|
||||
traefik_admin_password: "{{ secrets.traefik_admin_password }}"
|
||||
# staging or production for letsencrypt
|
||||
traefik_tls_mode: production
|
||||
|
||||
gitea_primary_domain_name: git.mnke.org
|
42
proxmox/ansible/lvm.yml
Normal file
42
proxmox/ansible/lvm.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Create LVM and mount it
|
||||
hosts: lvm
|
||||
remote_user: ubuntu
|
||||
become: true
|
||||
vars:
|
||||
pv_disks: "{{ lvm.pv_disks }}"
|
||||
vg_name: "{{ lvm.vg_name }}"
|
||||
lv_name: "{{ lvm.lv_name }}"
|
||||
lv_size: "{{ lvm.lv_size }}"
|
||||
fs_type: "{{ lvm.fs_type }}"
|
||||
mount_path: "{{ lvm.mount_path }}"
|
||||
tasks:
|
||||
- name: Create a volume group
|
||||
community.general.lvg:
|
||||
vg: "{{ vg_name }}"
|
||||
pvs: "{{ pv_disks }}"
|
||||
pvresize: yes
|
||||
|
||||
- name: Create Logical Volume for data persistence
|
||||
community.general.lvol:
|
||||
vg: "{{ vg_name }}"
|
||||
lv: "{{ lv_name }}"
|
||||
size: "{{ lv_size }}"
|
||||
|
||||
- name: Create filesystem on LV
|
||||
community.general.filesystem:
|
||||
fstype: "{{ fs_type }}"
|
||||
resizefs: true
|
||||
dev: /dev/mapper/{{ vg_name }}-{{ lv_name }}
|
||||
|
||||
- name: Get LV UUID
|
||||
ansible.builtin.command: lsblk /dev/mapper/{{ vg_name }}-{{ lv_name }} -no UUID
|
||||
register: lv_uuid
|
||||
changed_when: false
|
||||
|
||||
- name: Mount created filesystem
|
||||
ansible.posix.mount:
|
||||
path: "{{ mount_path }}"
|
||||
src: UUID={{ lv_uuid.stdout }}
|
||||
state: mounted
|
||||
fstype: "{{ fs_type }}"
|
25
proxmox/ansible/reset.yml
Normal file
25
proxmox/ansible/reset.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: Reset k3s cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.reset
|
||||
become: true
|
||||
- role: techno_tim.k3s_ansible.raspberrypi
|
||||
become: true
|
||||
vars: { state: absent }
|
||||
post_tasks:
|
||||
- name: Reboot and wait for node to come back up
|
||||
become: true
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Revert changes to Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
become: true
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.reset_proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
1
proxmox/ansible/roles/dns-server/defaults/main.yml
Normal file
1
proxmox/ansible/roles/dns-server/defaults/main.yml
Normal file
@ -0,0 +1 @@
|
||||
technitium_image: "technitium/dns-server:13.4.0"
|
52
proxmox/ansible/roles/dns-server/tasks/main.yml
Normal file
52
proxmox/ansible/roles/dns-server/tasks/main.yml
Normal file
@ -0,0 +1,52 @@
|
||||
- name: Ensure docker is installed
|
||||
apt:
|
||||
name:
|
||||
- docker.io
|
||||
- docker-compose-v2
|
||||
update_cache: yes
|
||||
state: present
|
||||
|
||||
- name: Set up directory
|
||||
become: true
|
||||
file:
|
||||
path: /etc/technitium
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy compose file
|
||||
become: true
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: /etc/technitium/docker-compose.yml
|
||||
mode: '0644'
|
||||
|
||||
- name: Create password file
|
||||
become: true
|
||||
copy:
|
||||
content: "{{ dns_server.admin_password }}"
|
||||
dest: /etc/technitium/password.txt
|
||||
mode: '0770'
|
||||
|
||||
- name: Pull Technitium image
|
||||
become: true
|
||||
community.docker.docker_image:
|
||||
name: "{{ technitium_image }}"
|
||||
source: pull
|
||||
|
||||
- name: Set resolv.conf
|
||||
become: true
|
||||
copy:
|
||||
content: nameserver 127.0.0.1
|
||||
dest: /etc/resolv.conf
|
||||
|
||||
- name: Disable systemd-resolved
|
||||
become: true
|
||||
service:
|
||||
name: systemd-resolved
|
||||
state: stopped
|
||||
enabled: false
|
||||
|
||||
- name: Start Technitium
|
||||
become: true
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: /etc/technitium
|
@ -0,0 +1,49 @@
|
||||
services:
|
||||
dns-server:
|
||||
container_name: dns-server
|
||||
hostname: dns-server
|
||||
image: {{ technitium_image }}
|
||||
# For DHCP deployments, use "host" network mode and remove all the port mappings, including the ports array by commenting them
|
||||
# network_mode: "host"
|
||||
ports:
|
||||
- "5380:5380/tcp" #DNS web console (HTTP)
|
||||
- "53443:53443/tcp" #DNS web console (HTTPS)
|
||||
- "53:53/udp" #DNS service
|
||||
- "53:53/tcp" #DNS service
|
||||
- "853:853/udp" #DNS-over-QUIC service
|
||||
- "853:853/tcp" #DNS-over-TLS service
|
||||
- "443:443/udp" #DNS-over-HTTPS service (HTTP/3)
|
||||
- "443:443/tcp" #DNS-over-HTTPS service (HTTP/1.1, HTTP/2)
|
||||
- "80:80/tcp" #DNS-over-HTTP service (use with reverse proxy or certbot certificate renewal)
|
||||
- "8053:8053/tcp" #DNS-over-HTTP service (use with reverse proxy)
|
||||
- "67:67/udp" #DHCP service
|
||||
environment:
|
||||
- DNS_SERVER_DOMAIN=dns-server #The primary domain name used by this DNS Server to identify itself.
|
||||
# - DNS_SERVER_ADMIN_PASSWORD=password #DNS web console admin user password.
|
||||
- DNS_SERVER_ADMIN_PASSWORD_FILE=/password.txt #The path to a file that contains a plain text password for the DNS web console admin user.
|
||||
- DNS_SERVER_PREFER_IPV6=false #DNS Server will use IPv6 for querying whenever possible with this option enabled.
|
||||
# - DNS_SERVER_WEB_SERVICE_LOCAL_ADDRESSES=172.17.0.1,127.0.0.1 #Comma separated list of network interface IP addresses that you want the web service to listen on for requests. The "172.17.0.1" address is the built-in Docker bridge. The "[::]" is the default value if not specified. Note! This must be used only with "host" network mode.
|
||||
- DNS_SERVER_WEB_SERVICE_HTTP_PORT=5380 #The TCP port number for the DNS web console over HTTP protocol.
|
||||
# - DNS_SERVER_WEB_SERVICE_HTTPS_PORT=53443 #The TCP port number for the DNS web console over HTTPS protocol.
|
||||
# - DNS_SERVER_WEB_SERVICE_ENABLE_HTTPS=false #Enables HTTPS for the DNS web console.
|
||||
# - DNS_SERVER_WEB_SERVICE_USE_SELF_SIGNED_CERT=false #Enables self signed TLS certificate for the DNS web console.
|
||||
# - DNS_SERVER_OPTIONAL_PROTOCOL_DNS_OVER_HTTP=false #Enables DNS server optional protocol DNS-over-HTTP on TCP port 8053 to be used with a TLS terminating reverse proxy like nginx.
|
||||
# - DNS_SERVER_RECURSION=AllowOnlyForPrivateNetworks #Recursion options: Allow, Deny, AllowOnlyForPrivateNetworks, UseSpecifiedNetworkACL.
|
||||
# - DNS_SERVER_RECURSION_NETWORK_ACL=192.168.10.0/24, !192.168.10.2 #Comma separated list of IP addresses or network addresses to allow access. Add ! character at the start to deny access, e.g. !192.168.10.0/24 will deny entire subnet. The ACL is processed in the same order its listed. If no networks match, the default policy is to deny all except loopback. Valid only for `UseSpecifiedNetworkACL` recursion option.
|
||||
# - DNS_SERVER_RECURSION_DENIED_NETWORKS=1.1.1.0/24 #Comma separated list of IP addresses or network addresses to deny recursion. Valid only for `UseSpecifiedNetworkACL` recursion option. This option is obsolete and DNS_SERVER_RECURSION_NETWORK_ACL should be used instead.
|
||||
# - DNS_SERVER_RECURSION_ALLOWED_NETWORKS=127.0.0.1, 192.168.1.0/24 #Comma separated list of IP addresses or network addresses to allow recursion. Valid only for `UseSpecifiedNetworkACL` recursion option. This option is obsolete and DNS_SERVER_RECURSION_NETWORK_ACL should be used instead.
|
||||
# - DNS_SERVER_ENABLE_BLOCKING=false #Sets the DNS server to block domain names using Blocked Zone and Block List Zone.
|
||||
# - DNS_SERVER_ALLOW_TXT_BLOCKING_REPORT=false #Specifies if the DNS Server should respond with TXT records containing a blocked domain report for TXT type requests.
|
||||
# - DNS_SERVER_BLOCK_LIST_URLS= #A comma separated list of block list URLs.
|
||||
# - DNS_SERVER_FORWARDERS=1.1.1.1, 8.8.8.8 #Comma separated list of forwarder addresses.
|
||||
# - DNS_SERVER_FORWARDER_PROTOCOL=Tcp #Forwarder protocol options: Udp, Tcp, Tls, Https, HttpsJson.
|
||||
# - DNS_SERVER_LOG_USING_LOCAL_TIME=true #Enable this option to use local time instead of UTC for logging.
|
||||
volumes:
|
||||
- config:/etc/dns
|
||||
- /etc/technitium/password.txt:/password.txt
|
||||
restart: unless-stopped
|
||||
sysctls:
|
||||
- net.ipv4.ip_local_port_range=1024 65000
|
||||
|
||||
volumes:
|
||||
config:
|
97
proxmox/ansible/roles/docker-swarm/tasks/main.yml
Normal file
97
proxmox/ansible/roles/docker-swarm/tasks/main.yml
Normal file
@ -0,0 +1,97 @@
|
||||
---
|
||||
- name: Create /etc/docker
|
||||
become: true
|
||||
file:
|
||||
path: /etc/docker
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Set docker daemon settings
|
||||
become: true
|
||||
# Otherwise we risk conflicts on the host subnet
|
||||
copy:
|
||||
content: |
|
||||
{
|
||||
"default-address-pools": [
|
||||
{
|
||||
"base": "172.17.0.0/12",
|
||||
"size": 20
|
||||
},
|
||||
{
|
||||
"base": "172.168.0.0/16",
|
||||
"size": 24
|
||||
}
|
||||
],
|
||||
"dns": ["10.0.123.123"]
|
||||
}
|
||||
dest: /etc/docker/daemon.json
|
||||
|
||||
- name: Install dependencies
|
||||
become: true
|
||||
apt:
|
||||
name:
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-docker
|
||||
- docker.io
|
||||
- docker-buildx
|
||||
- docker-compose-v2
|
||||
- python3-jsondiff
|
||||
- apache2-utils
|
||||
update_cache: yes
|
||||
|
||||
- name: Enable docker
|
||||
become: true
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: (swarm_manager) Create ingress network
|
||||
# The ingress network conflicts with my subnet and ends up causing problems,
|
||||
# so we have to set a different subnet first
|
||||
when: inventory_hostname == groups.swarm_managers[0]
|
||||
become: true
|
||||
docker_network:
|
||||
name: ingress
|
||||
driver: overlay
|
||||
ingress: true
|
||||
scope: swarm
|
||||
ipam_config:
|
||||
- subnet: 172.254.0.0/16
|
||||
gateway: 172.254.0.1
|
||||
driver_options:
|
||||
# I'm honestly not completely sure what this, but in the default
|
||||
# ingress network that's created during swarm initialization, this exists
|
||||
# and things don't seem to work without it.
|
||||
com.docker.network.driver.overlay.vxlanid_list: 4096
|
||||
|
||||
- name: (swarm_manager) Init swarm
|
||||
when: inventory_hostname == groups.swarm_managers[0]
|
||||
become: true
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address}}"
|
||||
register: manager_swarm_facts
|
||||
failed_when: manager_swarm_facts.failed
|
||||
|
||||
- when: inventory_hostname == groups.swarm_managers[0]
|
||||
set_fact:
|
||||
worker_join_token: "{{manager_swarm_facts.swarm_facts.JoinTokens.Worker}}"
|
||||
|
||||
- name: (swarm_workers) Join swarm
|
||||
when: inventory_hostname in groups.swarm_workers
|
||||
become: true
|
||||
docker_swarm:
|
||||
state: join
|
||||
join_token: "{{hostvars[groups.swarm_managers[0]].worker_join_token}}"
|
||||
advertise_addr: "{{ansible_default_ipv4.address}}"
|
||||
remote_addrs: ["{{hostvars[groups.swarm_managers[0]].ansible_default_ipv4.address}}"]
|
||||
|
||||
- name: Add user to docker group
|
||||
become: true
|
||||
user:
|
||||
name: "{{ ansible_user }}"
|
||||
groups:
|
||||
- docker
|
||||
|
46
proxmox/ansible/roles/gluster/tasks/init.yml
Normal file
46
proxmox/ansible/roles/gluster/tasks/init.yml
Normal file
@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Check if Gluster volume is initialized
|
||||
become: true
|
||||
stat:
|
||||
path: "{{gluster_volume_path}}/{{inventory_hostname}}/brick"
|
||||
register: glustervolume
|
||||
|
||||
- name: Verify Gluster volume path
|
||||
become: true
|
||||
file:
|
||||
path: "{{gluster_volume_path}}/{{inventory_hostname}}/brick"
|
||||
state: directory
|
||||
|
||||
- name: Initialize Gluster Cluster (on first node)
|
||||
become: true
|
||||
when: glustervolume.stat.exists == false and inventory_hostname == groups['gluster_nodes'][0]
|
||||
loop: "{{ groups['gluster_nodes'] }}"
|
||||
shell: gluster peer probe {{ item }}
|
||||
|
||||
- name: Create Gluster Volume (on first node)
|
||||
# TODO Make this scalable
|
||||
become: true
|
||||
when: glustervolume.stat.exists == false and inventory_hostname == groups['gluster_nodes'][0]
|
||||
shell: >
|
||||
gluster volume create {{gluster_volume_name}} \
|
||||
replica 3 \
|
||||
{{groups['gluster_nodes'][0]}}:{{gluster_volume_path}}/{{groups['gluster_nodes'][0]}}/brick \
|
||||
{{groups['gluster_nodes'][1]}}:{{gluster_volume_path}}/{{groups['gluster_nodes'][1]}}/brick \
|
||||
{{groups['gluster_nodes'][2]}}:{{gluster_volume_path}}/{{groups['gluster_nodes'][2]}}/brick
|
||||
|
||||
- name: Secure Gluster Volume (on first node)
|
||||
become: true
|
||||
when: inventory_hostname == groups['gluster_nodes'][0]
|
||||
shell: >
|
||||
gluster volume set {{gluster_volume_name}} auth.allow \
|
||||
{{groups['gluster_nodes'][0]}},{{groups['gluster_nodes'][1]}},{{groups['gluster_nodes'][2]}}
|
||||
changed_when: false
|
||||
|
||||
- name: Start Gluster Volume (on first node)
|
||||
become: true
|
||||
when: glustervolume.stat.exists == false and inventory_hostname == groups['gluster_nodes'][0]
|
||||
shell: gluster volume start {{gluster_volume_name}}
|
||||
|
||||
- name: Wait 60s for Gluster volume to be replicated
|
||||
when: glustervolume.stat.exists == false and inventory_hostname == groups['gluster_nodes'][0]
|
||||
shell: sleep 60
|
20
proxmox/ansible/roles/gluster/tasks/install.yml
Normal file
20
proxmox/ansible/roles/gluster/tasks/install.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Install Gluster and dependencies
|
||||
become: true
|
||||
apt:
|
||||
pkg:
|
||||
- xfsprogs
|
||||
- attr
|
||||
- glusterfs-server
|
||||
- glusterfs-common
|
||||
- glusterfs-client
|
||||
state: present
|
||||
force_apt_get: true
|
||||
update_cache: yes
|
||||
|
||||
- name: Enable Gluster service
|
||||
become: true
|
||||
systemd:
|
||||
name: glusterd
|
||||
enabled: yes
|
||||
state: started
|
5
proxmox/ansible/roles/gluster/tasks/main.yml
Normal file
5
proxmox/ansible/roles/gluster/tasks/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- import_tasks: xfs.yml
|
||||
- import_tasks: install.yml
|
||||
- import_tasks: init.yml
|
||||
- import_tasks: mount.yml
|
16
proxmox/ansible/roles/gluster/tasks/mount.yml
Normal file
16
proxmox/ansible/roles/gluster/tasks/mount.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Verify Gluster mount path
|
||||
become: true
|
||||
file:
|
||||
path: "{{gluster_mount_path}}"
|
||||
state: directory
|
||||
|
||||
- name: Mount Gluster volume
|
||||
become: true
|
||||
# TODO: Make this scalable in case different # of replicas
|
||||
mount:
|
||||
path: "{{gluster_mount_path}}"
|
||||
src: "localhost:/{{gluster_volume_name}}"
|
||||
fstype: glusterfs
|
||||
opts: defaults,_netdev,backupvolfile-server=localhost
|
||||
state: mounted
|
20
proxmox/ansible/roles/gluster/tasks/xfs.yml
Normal file
20
proxmox/ansible/roles/gluster/tasks/xfs.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Create XFS Path {{gluster_volume_path}}/{{inventory_hostname}}
|
||||
become: true
|
||||
file:
|
||||
path: "{{gluster_volume_path}}/{{inventory_hostname}}"
|
||||
state: directory
|
||||
|
||||
- name: Create a XFS filesystem on {{device2_hdd_dev}}
|
||||
become: true
|
||||
filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{device2_hdd_dev}}"
|
||||
|
||||
- name: Mount XFS volume {{device2_hdd_dev}} to {{gluster_volume_path}}/{{inventory_hostname}}
|
||||
become: true
|
||||
mount:
|
||||
path: "{{gluster_volume_path}}/{{inventory_hostname}}"
|
||||
src: "{{device2_hdd_dev}}"
|
||||
fstype: xfs
|
||||
state: mounted
|
86
proxmox/ansible/roles/swarm-bootstrap/tasks/main.yml
Normal file
86
proxmox/ansible/roles/swarm-bootstrap/tasks/main.yml
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
- name: Verify stacks directory exists (on first swarm node)
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
file:
|
||||
path: "/home/{{ansible_user}}/stacks/swarm-bootstrap"
|
||||
state: directory
|
||||
|
||||
- name: Verify bootstrap volume path (on first swarm node)
|
||||
become: true
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- "{{gluster_mount_path}}/swarm-bootstrap/traefik/letsencrypt"
|
||||
- "{{gluster_mount_path}}/swarm-bootstrap/traefik/secrets"
|
||||
- "{{gluster_mount_path}}/swarm-bootstrap/portainer"
|
||||
- "{{gluster_mount_path}}/swarm-bootstrap/gitea"
|
||||
|
||||
- name: Create CF secret
|
||||
become: true
|
||||
copy:
|
||||
content: "{{ cf_dns_api_token }}"
|
||||
dest: "{{gluster_mount_path}}/swarm-bootstrap/traefik/secrets/cf-dns-api-token.secret"
|
||||
mode: '0740'
|
||||
|
||||
- name: Set DNS servers
|
||||
become: true
|
||||
tags: [set_dns_servers]
|
||||
copy:
|
||||
content: |
|
||||
[Resolve]
|
||||
DNS=10.0.123.123
|
||||
# FallbackDNS=1.1.1.1
|
||||
dest: /etc/systemd/resolved.conf
|
||||
register: dns_servers_configuration
|
||||
|
||||
- name: Restart systemd-resolved
|
||||
tags: [set_dns_servers]
|
||||
service:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
when: dns_servers_configuration.changed
|
||||
|
||||
- name: Generate Traefik admin password hash
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
shell: echo $(htpasswd -nb {{traefik_admin_user}} {{traefik_admin_password}}) | sed -e s/\\$/\\$\\$/g
|
||||
register: traefikpassword
|
||||
changed_when: false
|
||||
|
||||
- name: Generate Portainer admin password hash
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
shell: echo $(htpasswd -nBb admin {{portainer_admin_password}}) | cut -d ":" -f 2 | sed -e s/\\$/\\$\\$/g
|
||||
register: portainerpassword
|
||||
changed_when: false
|
||||
|
||||
- name: Create git user
|
||||
become: true
|
||||
user:
|
||||
name: git
|
||||
create_home: true
|
||||
register: git_user
|
||||
|
||||
- set_fact:
|
||||
portainer_htpasswd: "{{portainerpassword.stdout}}"
|
||||
traefik_htpasswd: "{{traefikpassword.stdout}}"
|
||||
git_user_id: "{{git_user.uid}}"
|
||||
git_group_id: "{{git_user.group}}"
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
|
||||
- name: Create docker-compose stack file (on first swarm node)
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
template:
|
||||
src: docker-stack.yml.j2
|
||||
dest: /home/{{ansible_user}}/stacks/swarm-bootstrap/docker-stack.yml
|
||||
mode: 0755
|
||||
|
||||
- name: Deploy stack from a compose file (on first swarm node)
|
||||
when: inventory_hostname == groups['swarm_managers'][0]
|
||||
become: true
|
||||
docker_stack:
|
||||
state: present
|
||||
name: swarm-bootstrap
|
||||
detach: false
|
||||
compose:
|
||||
- /home/{{ansible_user}}/stacks/swarm-bootstrap/docker-stack.yml
|
@ -0,0 +1,181 @@
|
||||
networks:
|
||||
gitea:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
name: gitea
|
||||
traefik:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
name: traefik
|
||||
portainer:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
name: portainer
|
||||
|
||||
volumes:
|
||||
gitea:
|
||||
driver: local
|
||||
driver_opts:
|
||||
o: bind
|
||||
type: none
|
||||
device: {{gluster_mount_path}}/swarm-bootstrap/gitea
|
||||
name: gitea
|
||||
portainer_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
o: bind
|
||||
type: none
|
||||
device: {{gluster_mount_path}}/swarm-bootstrap/portainer
|
||||
name: portainer_data
|
||||
traefik:
|
||||
driver: local
|
||||
driver_opts:
|
||||
o: bind
|
||||
type: none
|
||||
device: {{gluster_mount_path}}/swarm-bootstrap/traefik
|
||||
name: traefik
|
||||
|
||||
secrets:
|
||||
cf_dns_api_token:
|
||||
file: "{{gluster_mount_path}}/swarm-bootstrap/traefik/secrets/cf-dns-api-token.secret"
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.3
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
command:
|
||||
- "--log.level=DEBUG"
|
||||
- "--api.dashboard=true"
|
||||
# Allow invalid TLS certs internally
|
||||
- "--api.insecure=true"
|
||||
# Swarm settings
|
||||
- "--providers.swarm=true"
|
||||
- "--providers.swarm.exposedByDefault=false"
|
||||
- "--providers.swarm.endpoint=unix:///var/run/docker.sock"
|
||||
# HTTP
|
||||
- "--entrypoints.web.address=:{{traefik_listen_port}}"
|
||||
# Redirect to HTTPS
|
||||
- "--entrypoints.web.http.redirections.entrypoint.to=websecure"
|
||||
- "--entrypoints.web.http.redirections.entrypoint.scheme=https"
|
||||
- "--entrypoints.websecure.address=:{{traefik_secure_listen_port}}"
|
||||
# TLS
|
||||
- "--certificatesresolvers.letsencrypt.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare"
|
||||
- "--certificatesresolvers.letsencrypt.acme.caserver={{'https://acme-v02.api.letsencrypt.org/directory' if traefik_tls_mode == 'production' else 'https://acme-staging-v02.api.letsencrypt.org/directory'}}"
|
||||
- "--certificatesresolvers.letsencrypt.acme.email={{cf_email}}"
|
||||
- "--certificatesresolvers.letsencrypt.acme.storage=/data/letsencrypt/acme.json"
|
||||
ports:
|
||||
- "{{traefik_listen_port}}:{{traefik_listen_port}}"
|
||||
- "{{traefik_secure_listen_port}}:{{traefik_secure_listen_port}}"
|
||||
- "{{traefik_admin_port}}:8080"
|
||||
secrets:
|
||||
- "cf_dns_api_token"
|
||||
environment:
|
||||
- "CLOUDFLARE_EMAIL={{cf_email}}"
|
||||
- "CF_DNS_API_TOKEN_FILE=/run/secrets/cf_dns_api_token"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- traefik:/data
|
||||
networks:
|
||||
- traefik
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.api.rule=Host(`traefik.{{app_domain_name}}`)"
|
||||
- "traefik.http.routers.api.service=api@internal"
|
||||
- "traefik.http.routers.api.middlewares=auth"
|
||||
# TODO: Store this in a secret
|
||||
# It's not thaaat big of a deal cuz it's hashed anyway though.
|
||||
- "traefik.http.middlewares.auth.basicauth.users={{traefik_htpasswd}}"
|
||||
# Dummy service for Swarm port detection. The port can be any valid integer value.
|
||||
- "traefik.http.services.dummy-svc.loadbalancer.server.port=9999"
|
||||
mode: global
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
whoami:
|
||||
image: "traefik/whoami"
|
||||
networks:
|
||||
- traefik
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.whoami.rule=Host(`whoami.stingray.mnke.org`)"
|
||||
- "traefik.http.routers.whoami.entrypoints=websecure"
|
||||
- "traefik.http.routers.whoami.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.whoami.loadbalancer.server.port=80"
|
||||
- "traefik.swarm.network=traefik"
|
||||
|
||||
agent:
|
||||
image: portainer/agent:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
networks:
|
||||
- portainer
|
||||
environment:
|
||||
AGENT_SECRET: {{portainer_agent_secret}}
|
||||
deploy:
|
||||
mode: global
|
||||
placement:
|
||||
constraints: [node.platform.os == linux]
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer:latest
|
||||
command: "-H tcp://tasks.agent:9001 --tlsskipverify --bind :9000 --tunnel-port 8000 --admin-password {{portainer_htpasswd}}"
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- portainer_data:/data
|
||||
networks:
|
||||
- portainer
|
||||
- traefik
|
||||
environment:
|
||||
# TODO: Load this in a secret
|
||||
AGENT_SECRET: {{portainer_agent_secret}}
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.portainer.rule=Host(`portainer.{{app_domain_name}}`)"
|
||||
- "traefik.http.routers.portainer.entrypoints=websecure"
|
||||
- "traefik.http.routers.portainer.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.portainer.loadbalancer.server.port=9000"
|
||||
- "traefik.swarm.network=traefik"
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
gitea:
|
||||
image: docker.io/gitea/gitea:1.23.1
|
||||
environment:
|
||||
- USER_UID={{git_user_id}}
|
||||
- USER_GID={{git_group_id}}
|
||||
- USER=git
|
||||
- GITEA_APP_NAME=mnke
|
||||
- GITEA__server__DOMAIN={{gitea_primary_domain_name}}
|
||||
- GITEA__server__ROOT_URL=https://{{gitea_primary_domain_name}}
|
||||
networks:
|
||||
- gitea
|
||||
- traefik
|
||||
volumes:
|
||||
- gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "222:22"
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.gitea.rule=Host(`git.{{app_domain_name}}`) || Host(`{{gitea_primary_domain_name}}`)"
|
||||
- "traefik.http.routers.gitea.entrypoints=websecure"
|
||||
- "traefik.http.routers.gitea.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.gitea.loadbalancer.server.port=3000"
|
||||
- "traefik.swarm.network=traefik"
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
81
proxmox/ansible/site.yml
Normal file
81
proxmox/ansible/site.yml
Normal file
@ -0,0 +1,81 @@
|
||||
---
|
||||
- name: Pre tasks
|
||||
hosts: k3s_cluster
|
||||
pre_tasks:
|
||||
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
|
||||
ansible.builtin.assert:
|
||||
that: ansible_version.full is version_compare('2.11', '>=')
|
||||
msg: >
|
||||
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
|
||||
tasks:
|
||||
- name: Disable multipathd for longhorn
|
||||
become: true
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
loop:
|
||||
- multipathd.service
|
||||
- multipathd.socket
|
||||
|
||||
- name: Prepare Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
become: true
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
- name: Prepare k3s nodes
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.lxc
|
||||
become: true
|
||||
when: proxmox_lxc_configure
|
||||
- role: techno_tim.k3s_ansible.prereq
|
||||
become: true
|
||||
- role: techno_tim.k3s_ansible.download
|
||||
become: true
|
||||
- role: techno_tim.k3s_ansible.raspberrypi
|
||||
become: true
|
||||
- role: techno_tim.k3s_ansible.k3s_custom_registries
|
||||
become: true
|
||||
when: custom_registries
|
||||
|
||||
|
||||
- name: Setup k3s servers
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.k3s_server
|
||||
when: not (skip_setup_servers | default(false))
|
||||
become: true
|
||||
|
||||
- name: Setup k3s agents
|
||||
hosts: node
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.k3s_agent
|
||||
when: not (skip_setup_agents | default(false))
|
||||
become: true
|
||||
|
||||
- name: Configure k3s cluster
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: techno_tim.k3s_ansible.k3s_server_post
|
||||
become: true
|
||||
|
||||
- name: Storing kubeconfig in the playbook directory
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
tasks:
|
||||
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ ansible_user_dir }}/.kube/config"
|
||||
dest: ./kubeconfig
|
||||
flat: true
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
7
proxmox/ansible/swarm.yml
Normal file
7
proxmox/ansible/swarm.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Set up Docker Swarm
|
||||
hosts: swarm
|
||||
roles:
|
||||
- docker-swarm
|
||||
- gluster
|
||||
- swarm-bootstrap
|
41
proxmox/docker/stacks/db-ui/docker-stack.yml
Normal file
41
proxmox/docker/stacks/db-ui/docker-stack.yml
Normal file
@ -0,0 +1,41 @@
|
||||
version: '3.1'
|
||||
|
||||
networks:
|
||||
traefik:
|
||||
external: true
|
||||
|
||||
services:
|
||||
phpmyadmin:
|
||||
image: phpmyadmin
|
||||
environment:
|
||||
- PMA_HOST=${DB_HOST:-db.home.mnke.org}
|
||||
networks:
|
||||
- traefik
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.pma.rule=Host(`${PMA_HOST:-pma.stingray.mnke.org}`)"
|
||||
- "traefik.http.routers.pma.entrypoints=websecure"
|
||||
- "traefik.http.routers.pma.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.pma.loadbalancer.server.port=80"
|
||||
- "traefik.swarm.network=traefik"
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
pgadmin:
|
||||
image: dpage/pgadmin4
|
||||
environment:
|
||||
- PGADMIN_DEFAULT_EMAIL=${PGADMIN_EMAIL:-tony@mnke.org}
|
||||
- PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD:-password}
|
||||
networks:
|
||||
- traefik
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.pgadmin.rule=Host(`${PGADMIN_HOST:-pgadmin.stingray.mnke.org}`)"
|
||||
- "traefik.http.routers.pgadmin.entrypoints=websecure"
|
||||
- "traefik.http.routers.pgadmin.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.pgadmin.loadbalancer.server.port=80"
|
||||
- "traefik.swarm.network=traefik"
|
||||
mode: replicated
|
||||
replicas: 1
|
31
proxmox/docker/stacks/dockge/docker-stack.yml
Normal file
31
proxmox/docker/stacks/dockge/docker-stack.yml
Normal file
@ -0,0 +1,31 @@
|
||||
version: "3.8"
|
||||
|
||||
networks:
|
||||
traefik:
|
||||
external: true
|
||||
|
||||
services:
|
||||
dockge:
|
||||
image: louislam/dockge:1
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ${DATA_DIRECTORY:-/mnt/gfs/dockge/data}:/app/data
|
||||
- /opt/stacks:/opt/stacks
|
||||
environment:
|
||||
# Tell Dockge where to find the stacks
|
||||
- DOCKGE_STACKS_DIR=/opt/stacks
|
||||
networks:
|
||||
- traefik
|
||||
deploy:
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.dockge.rule=Host(`${DOCKGE_HOST:-dockge.stingray.mnke.org}`)"
|
||||
- "traefik.http.routers.dockge.entrypoints=websecure"
|
||||
- "traefik.http.routers.dockge.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.dockge.loadbalancer.server.port=5001"
|
||||
- "traefik.swarm.network=traefik"
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
83
proxmox/k8s/helmfile.d/00-core.yaml
Normal file
83
proxmox/k8s/helmfile.d/00-core.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
repositories:
|
||||
- name: traefik
|
||||
url: https://helm.traefik.io/traefik
|
||||
- name: jetstack
|
||||
url: https://charts.jetstack.io
|
||||
- name: rancher-stable
|
||||
url: https://releases.rancher.com/server-charts/stable
|
||||
- name: longhorn
|
||||
url: https://charts.longhorn.io
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: nfs-subdir-external-provisioner
|
||||
url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
- name: prometheus-community
|
||||
url: https://prometheus-community.github.io/helm-charts
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: traefik
|
||||
namespace: {{ .Values.globals.traefik.namespace }}
|
||||
createNamespace: true
|
||||
chart: traefik/traefik
|
||||
values:
|
||||
- ./values/traefik/values.yaml.gotmpl
|
||||
|
||||
- name: cert-manager
|
||||
namespace: {{ .Values.globals.certManager.namespace }}
|
||||
createNamespace: true
|
||||
chart: jetstack/cert-manager
|
||||
values:
|
||||
- ./values/cert-manager/values.yml
|
||||
|
||||
- name: certs
|
||||
chart: ./charts/certs
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
values:
|
||||
- ./values/certs/values.yaml.gotmpl
|
||||
|
||||
- name: nfs-subdir-external-provisioner
|
||||
namespace: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}
|
||||
createNamespace: true
|
||||
chart: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}/nfs-subdir-external-provisioner
|
||||
values:
|
||||
- ./values/nfs-subdir-external-provisioner/values.yaml.gotmpl
|
||||
|
||||
- name: rancher
|
||||
namespace: {{ .Values.globals.rancher.namespace }}
|
||||
createNamespace: true
|
||||
chart: rancher-stable/rancher
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
values:
|
||||
- ./values/rancher/values.yaml.gotmpl
|
||||
|
||||
- name: longhorn
|
||||
namespace: {{ .Values.globals.longhorn.namespace }}
|
||||
createNamespace: true
|
||||
chart: longhorn/longhorn
|
||||
values:
|
||||
- ./values/longhorn/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.rancher.namespace }}/rancher
|
||||
|
||||
|
||||
- name: kube-prometheus-stack
|
||||
namespace: {{ .Values.globals.kubePrometheusStack.namespace }}
|
||||
createNamespace: true
|
||||
chart: prometheus-community/kube-prometheus-stack
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
- {{ .Values.globals.longhorn.namespace }}/longhorn
|
||||
values:
|
||||
- ./values/kube-prometheus-stack/values.yaml.gotmpl
|
||||
|
67
proxmox/k8s/helmfile.d/01-databases.yaml
Normal file
67
proxmox/k8s/helmfile.d/01-databases.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
repositories:
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: runix
|
||||
url: https://helm.runix.net
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: mysql
|
||||
namespace: {{ .Values.globals.mysql.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/mysql
|
||||
values:
|
||||
- ./values/mysql/values.yaml.gotmpl
|
||||
|
||||
- name: phpmyadmin
|
||||
namespace: {{ .Values.globals.phpmyadmin.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/phpmyadmin
|
||||
values:
|
||||
- ./values/phpmyadmin/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.mysql.namespace }}/mysql
|
||||
|
||||
- name: postgres
|
||||
namespace: {{ .Values.globals.postgres.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/postgresql
|
||||
values:
|
||||
- ./values/postgres/values.yaml.gotmpl
|
||||
|
||||
- name: pgadmin4
|
||||
namespace: {{ .Values.globals.pgadmin4.namespace }}
|
||||
createNamespace: true
|
||||
chart: runix/pgadmin4
|
||||
values:
|
||||
- ./values/pgadmin4/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.postgres.namespace }}/postgres
|
||||
|
||||
- name: init-dbs
|
||||
# It doesn't really matter where we put this, but I don't want it polluting
|
||||
# the default namespace
|
||||
namespace: init-dbs
|
||||
createNamespace: true
|
||||
chart: ./charts/init-dbs
|
||||
values:
|
||||
- ./values/init-dbs/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.postgres.namespace }}/postgres
|
||||
- {{ .Values.globals.mysql.namespace }}/mysql
|
||||
|
||||
- name: redis
|
||||
namespace: {{ .Values.globals.redis.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/redis
|
||||
values:
|
||||
- ./values/redis/values.yaml.gotmpl
|
||||
|
64
proxmox/k8s/helmfile.d/02-applications.yaml
Normal file
64
proxmox/k8s/helmfile.d/02-applications.yaml
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
repositories:
|
||||
- name: gitlab
|
||||
url: https://charts.gitlab.io
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: uptime-kuma
|
||||
url: https://helm.irsigler.cloud
|
||||
- name: authentik
|
||||
url: https://charts.goauthentik.io
|
||||
- name: harbor
|
||||
url: https://helm.goharbor.io
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: uptime-kuma
|
||||
namespace: {{ .Values.globals.uptimeKuma.namespace }}
|
||||
createNamespace: true
|
||||
chart: uptime-kuma/uptime-kuma
|
||||
values:
|
||||
- ./values/uptime-kuma/values.yaml.gotmpl
|
||||
|
||||
- name: authentik
|
||||
namespace: {{ .Values.globals.authentik.namespace }}
|
||||
createNamespace: true
|
||||
chart: authentik/authentik
|
||||
values:
|
||||
- ./values/authentik/values.yaml.gotmpl
|
||||
|
||||
- name: argo-cd
|
||||
namespace: {{ .Values.globals.argocd.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/argo-cd
|
||||
values:
|
||||
- ./values/argo-cd/values.yaml.gotmpl
|
||||
|
||||
- name: harbor
|
||||
namespace: {{ .Values.globals.harbor.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/harbor
|
||||
values:
|
||||
- ./values/harbor/values.yaml.gotmpl
|
||||
|
||||
- name: ghost
|
||||
namespace: {{ .Values.globals.ghost.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/ghost
|
||||
values:
|
||||
- ./values/ghost/values.yaml.gotmpl
|
||||
|
||||
- name: gitea
|
||||
installed: false
|
||||
namespace: {{ .Values.globals.gitea.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/gitea
|
||||
values:
|
||||
- ./values/gitea/values.yaml.gotmpl
|
23
proxmox/k8s/helmfile.d/charts/certs/.helmignore
Normal file
23
proxmox/k8s/helmfile.d/charts/certs/.helmignore
Normal file
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
24
proxmox/k8s/helmfile.d/charts/certs/Chart.yaml
Normal file
24
proxmox/k8s/helmfile.d/charts/certs/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: certs
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
0
proxmox/k8s/helmfile.d/charts/certs/templates/NOTES.txt
Normal file
0
proxmox/k8s/helmfile.d/charts/certs/templates/NOTES.txt
Normal file
62
proxmox/k8s/helmfile.d/charts/certs/templates/_helpers.tpl
Normal file
62
proxmox/k8s/helmfile.d/charts/certs/templates/_helpers.tpl
Normal file
@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "certs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "certs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "certs.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "certs.labels" -}}
|
||||
helm.sh/chart: {{ include "certs.chart" . }}
|
||||
{{ include "certs.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "certs.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "certs.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "certs.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "certs.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -0,0 +1,17 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ .Values.certificateName }}
|
||||
namespace: {{ .Values.certificateNamespace }}
|
||||
spec:
|
||||
secretName: {{ .Values.certificateSecretName }}
|
||||
issuerRef:
|
||||
name: {{ .Values.issuerName | quote }}
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.commonName | quote }}
|
||||
dnsNames:
|
||||
{{- range .Values.dnsNames }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
10
proxmox/k8s/helmfile.d/charts/certs/templates/cf-secret.yml
Normal file
10
proxmox/k8s/helmfile.d/charts/certs/templates/cf-secret.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.cloudflareTokenSecretName }}
|
||||
namespace: {{ .Values.certManagerNamespace }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
cloudflare-token: {{ .Values.cloudflareSecretToken }}
|
||||
|
@ -0,0 +1,24 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.issuerName }}
|
||||
spec:
|
||||
acme:
|
||||
server: {{- if eq .Values.issuerMode "staging" }} "https://acme-staging-v02.api.letsencrypt.org/directory" {{- else }} "https://acme-v02.api.letsencrypt.org/directory" {{- end }}
|
||||
email: {{ .Values.acmeEmail }}
|
||||
privateKeySecretRef:
|
||||
name: {{ .Values.privateKeySecretRef }}
|
||||
solvers:
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: {{ .Values.cloudflareEmail }}
|
||||
apiTokenSecretRef:
|
||||
name: {{ .Values.cloudflareTokenSecretName }}
|
||||
key: cloudflare-token
|
||||
selector:
|
||||
dnsZones:
|
||||
{{- range .Values.dnsZones }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
28
proxmox/k8s/helmfile.d/charts/certs/values.yaml
Normal file
28
proxmox/k8s/helmfile.d/charts/certs/values.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
acmeEmail: tonydu121@hotmail.com
|
||||
cloudflareEmail: tonydu121@hotmail.com
|
||||
|
||||
# staging or production
|
||||
issuerMode: staging
|
||||
|
||||
issuerName: letsencrypt
|
||||
privateKeySecretRef: letsencrypt
|
||||
|
||||
certManagerNamespace: cert-manager
|
||||
|
||||
cloudflareSecretToken: redacted
|
||||
cloudflareTokenSecretName: cloudflare-token-secret
|
||||
|
||||
dnsZones:
|
||||
- "mnke.org"
|
||||
- "*.mnke.org"
|
||||
- "*.hl.mnke.org"
|
||||
|
||||
# TODO: Allow for multiple creation
|
||||
certificateName: hl-mnke-org
|
||||
certificateNamespace: default
|
||||
certificateSecretName: hl-mnke-org-tls
|
||||
|
||||
commonName: "*.hl.mnke.org"
|
||||
dnsNames:
|
||||
- "hl.mnke.org"
|
||||
- "*.hl.mnke.org"
|
23
proxmox/k8s/helmfile.d/charts/init-dbs/.helmignore
Normal file
23
proxmox/k8s/helmfile.d/charts/init-dbs/.helmignore
Normal file
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
24
proxmox/k8s/helmfile.d/charts/init-dbs/Chart.yaml
Normal file
24
proxmox/k8s/helmfile.d/charts/init-dbs/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: init-dbs
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
@ -0,0 +1,51 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "init-dbs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "init-dbs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "init-dbs.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "init-dbs.labels" -}}
|
||||
helm.sh/chart: {{ include "init-dbs.chart" . }}
|
||||
{{ include "init-dbs.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "init-dbs.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "init-dbs.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
112
proxmox/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml
Normal file
112
proxmox/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml
Normal file
@ -0,0 +1,112 @@
|
||||
{{- range .Values.postgres.databases }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
|
||||
image: {{ $.Values.postgres.image.ref }}
|
||||
imagePullPolicy: {{ $.Values.postgres.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
# If the username and database exists, whatever, just exit.
|
||||
# Yeah, if something else went wrong, we're still exiting with code 0,
|
||||
# but it should be fine.
|
||||
- |
|
||||
sleep 10s && \
|
||||
psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
|
||||
-tc "SELECT 1 FROM pg_database WHERE datname = '$DATABASE'" |\
|
||||
grep -q 1 ||\
|
||||
psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
|
||||
-c "CREATE USER $USERNAME WITH ENCRYPTED PASSWORD '$PASSWORD';" \
|
||||
-c "CREATE DATABASE $DATABASE WITH OWNER = $USERNAME;"
|
||||
env:
|
||||
- name: PGUSER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: PGPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-username
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-password
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-database
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
{{- end }}
|
||||
|
||||
{{- range .Values.mysql.databases }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
|
||||
image: {{ $.Values.mysql.image.ref }}
|
||||
imagePullPolicy: {{ $.Values.mysql.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
- |
|
||||
sleep 10s && \
|
||||
mysql -h {{ $.Values.mysql.host }} -u $MYUSER mysql --password=$MYPASSWORD \
|
||||
-e "CREATE DATABASE IF NOT EXISTS $DATABASE;" \
|
||||
-e "CREATE USER IF NOT EXISTS '$USERNAME'@'%' IDENTIFIED BY '$PASSWORD';" \
|
||||
-e "GRANT ALL PRIVILEGES ON $DATABASE TO '$USERNAME'@'%';"
|
||||
env:
|
||||
- name: MYUSER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: MYPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-username
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-password
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-database
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
{{- end }}
|
@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
labels:
|
||||
{{- include "init-dbs.labels" $ | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: {{ .Values.postgres.username }}
|
||||
password: {{ .Values.postgres.password }}
|
||||
{{- range .Values.postgres.databases }}
|
||||
{{ .database }}-database: {{ .database }}
|
||||
{{ .database }}-username: {{ .username }}
|
||||
{{ .database }}-password: {{ .password }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
labels:
|
||||
{{- include "init-dbs.labels" $ | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: {{ .Values.mysql.username }}
|
||||
password: {{ .Values.mysql.password }}
|
||||
{{- range .Values.mysql.databases }}
|
||||
{{ .database }}-database: {{ .database }}
|
||||
{{ .database }}-username: {{ .username }}
|
||||
{{ .database }}-password: {{ .password }}
|
||||
{{- end }}
|
36
proxmox/k8s/helmfile.d/charts/init-dbs/values.yaml
Normal file
36
proxmox/k8s/helmfile.d/charts/init-dbs/values.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
postgres:
|
||||
image:
|
||||
ref: postgres
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: ""
|
||||
username: postgres
|
||||
password: ""
|
||||
databases:
|
||||
- database: test
|
||||
username: test
|
||||
password: test
|
||||
- database: test1
|
||||
username: test1
|
||||
password: test1
|
||||
mysql:
|
||||
image:
|
||||
ref: mysql
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: ""
|
||||
username: root
|
||||
password: ""
|
||||
databases:
|
||||
- database: test
|
||||
username: test
|
||||
password: test
|
||||
- database: test1
|
||||
username: test1
|
||||
password: test1
|
4190
proxmox/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl
Normal file
4190
proxmox/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
1036
proxmox/k8s/helmfile.d/values/authentik/values.yaml.gotmpl
Normal file
1036
proxmox/k8s/helmfile.d/values/authentik/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
11
proxmox/k8s/helmfile.d/values/cert-manager/values.yml
Normal file
11
proxmox/k8s/helmfile.d/values/cert-manager/values.yml
Normal file
@ -0,0 +1,11 @@
|
||||
crds:
|
||||
enabled: true
|
||||
replicaCount: 3
|
||||
extraArgs:
|
||||
- --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53
|
||||
- --dns01-recursive-nameservers-only
|
||||
podDnsPolicy: None
|
||||
podDnsConfig:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 9.9.9.9
|
28
proxmox/k8s/helmfile.d/values/certs/values.yaml.gotmpl
Normal file
28
proxmox/k8s/helmfile.d/values/certs/values.yaml.gotmpl
Normal file
@ -0,0 +1,28 @@
|
||||
acmeEmail: {{ .Values.globals.certs.acmeEmail }}
|
||||
cloudflareEmail: {{ .Values.globals.certs.cloudflareEmail }}
|
||||
|
||||
# staging or production
|
||||
issuerMode: {{ .Values.globals.certs.certIssuerMode }}
|
||||
|
||||
issuerName: {{ .Values.globals.certs.issuerName }}
|
||||
privateKeySecretRef: {{ .Values.globals.certs.privateKeySecretRef }}
|
||||
|
||||
certManagerNamespace: {{ .Values.globals.certManager.namespace }}
|
||||
|
||||
cloudflareSecretToken: {{ .Values.globals.certs.cloudflareSecretToken }}
|
||||
cloudflareTokenSecretName: {{ .Values.globals.certs.cloudflareTokenSecretName }}
|
||||
|
||||
dnsZones:
|
||||
{{- range .Values.globals.certs.hlMnkeOrg.dnsZones }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
||||
certificateName: {{ .Values.globals.certs.hlMnkeOrg.certificateName }}
|
||||
certificateSecretName: {{ .Values.globals.certs.hlMnkeOrg.certificateSecretName }}
|
||||
certificateNamespace: {{ .Values.globals.certs.hlMnkeOrg.certificateNamespace }}
|
||||
|
||||
commonName: {{ .Values.globals.certs.hlMnkeOrg.commonName }}
|
||||
dnsNames:
|
||||
{{- range .Values.globals.certs.hlMnkeOrg.dnsNames }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
876
proxmox/k8s/helmfile.d/values/ghost/values.yaml.gotmpl
Normal file
876
proxmox/k8s/helmfile.d/values/ghost/values.yaml.gotmpl
Normal file
@ -0,0 +1,876 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: {{ .Values.globals.ghost.storageClass }}
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @section Common parameters
|
||||
|
||||
## @param kubeVersion Override Kubernetes version
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override common.names.fullname
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param commonLabels Labels to add to all deployed objects
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Annotations to add to all deployed objects
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Kubernetes cluster domain name
|
||||
##
|
||||
clusterDomain: cluster.local
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## Enable diagnostic mode in the deployment
|
||||
##
|
||||
diagnosticMode:
|
||||
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
|
||||
##
|
||||
enabled: false
|
||||
## @param diagnosticMode.command Command to override all containers in the deployment
|
||||
##
|
||||
command:
|
||||
- sleep
|
||||
## @param diagnosticMode.args Args to override all containers in the deployment
|
||||
##
|
||||
args:
|
||||
- infinity
|
||||
## @section Ghost Image parameters
|
||||
|
||||
## Bitnami Ghost image
|
||||
## ref: https://hub.docker.com/r/bitnami/ghost/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Ghost image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/ghost] Ghost image repository
|
||||
## @skip image.tag Ghost image tag (immutable tags are recommended)
|
||||
## @param image.digest Ghost image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Ghost image pull policy
|
||||
## @param image.pullSecrets Ghost image pull secrets
|
||||
## @param image.debug Enable image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/ghost
|
||||
tag: 5.108.1-debian-12-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Enable debug mode
|
||||
##
|
||||
debug: true
|
||||
## @section Ghost Configuration parameters
|
||||
## Ghost settings based on environment variables
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost#configuration
|
||||
|
||||
## @param ghostUsername Ghost user name
|
||||
##
|
||||
ghostUsername: user
|
||||
## @param ghostPassword Ghost user password
|
||||
## Defaults to a random 10-character alphanumeric string if not set
|
||||
##
|
||||
ghostPassword: "{{ .Values.globals.ghost.ghostPassword }}"
|
||||
## @param existingSecret Name of existing secret containing Ghost credentials
|
||||
## NOTE: Must contain key `ghost-password`
|
||||
## NOTE: When it's set, the `ghostPassword` parameter is ignored
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param ghostEmail Ghost user email
|
||||
##
|
||||
ghostEmail: {{ .Values.globals.ghost.ghostEmail }}
|
||||
## @param ghostBlogTitle Ghost Blog title
|
||||
##
|
||||
ghostBlogTitle: User's Blog
|
||||
## @param ghostHost Ghost host to create application URLs
|
||||
##
|
||||
ghostHost: {{ .Values.globals.ghost.primaryHost }}
|
||||
## @param ghostPath URL sub path where to server the Ghost application
|
||||
##
|
||||
ghostPath: /
|
||||
## @param ghostEnableHttps Configure Ghost to build application URLs using https
|
||||
##
|
||||
## This turns on whether URLs like the home page button use HTTPS URL schemes.
|
||||
## If you turn this on, then shit will break because it seems like
|
||||
## the reverse proxy (or any client) encounters a 301 redirect to https from
|
||||
## ghost, but then ghost doesn't actually serve HTTPS so the reverse proxy
|
||||
## ends up 501 internal server error'ing. We _do_ want to have HTTPS URLS in
|
||||
## Ghost, so we need to turn this on, and for some ABSOLUTELY UNGODLY REASON,
|
||||
## if I turn this on (and thereby making every request 501 error) AND THEN turn
|
||||
## it off again (allowing normal traffic again), we continue generating HTTPS
|
||||
## URLs. I'm going to fucking punch my monitor.
|
||||
##
|
||||
## - Tony
|
||||
ghostEnableHttps: false
|
||||
## SMTP mail delivery configuration
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost/#smtp-configuration
|
||||
## @param smtpHost SMTP server host
|
||||
## @param smtpPort SMTP server port
|
||||
## @param smtpUser SMTP username
|
||||
## @param smtpPassword SMTP user password
|
||||
## @param smtpService SMTP service
|
||||
## @param smtpProtocol SMTP protocol (ssl or tls)
|
||||
##
|
||||
smtpHost: ""
|
||||
smtpPort: ""
|
||||
smtpUser: ""
|
||||
smtpPassword: ""
|
||||
smtpService: ""
|
||||
smtpProtocol: ""
|
||||
## @param smtpExistingSecret The name of an existing secret with SMTP credentials
|
||||
## NOTE: Must contain key `smtp-password`
|
||||
## NOTE: When it's set, the `smtpPassword` parameter is ignored
|
||||
##
|
||||
smtpExistingSecret: ""
|
||||
## @param allowEmptyPassword Allow the container to be started with blank passwords
|
||||
##
|
||||
allowEmptyPassword: false
|
||||
## @param ghostSkipInstall Skip performing the initial bootstrapping for Ghost
|
||||
##
|
||||
ghostSkipInstall: false
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param extraEnvVars Array with extra environment variables to add to the Ghost container
|
||||
## e.g:
|
||||
## extraEnvVars:
|
||||
## - name: FOO
|
||||
## value: "bar"
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Name of existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @section Ghost deployment parameters
|
||||
|
||||
## @param replicaCount Number of Ghost replicas to deploy
|
||||
## NOTE: ReadWriteMany PVC(s) are required if replicaCount > 1
|
||||
##
|
||||
replicaCount: 1
|
||||
## @param updateStrategy.type Ghost deployment strategy type
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
## NOTE: Set it to `Recreate` if you use a PV that cannot be mounted on multiple pods
|
||||
## e.g:
|
||||
## updateStrategy:
|
||||
## type: RollingUpdate
|
||||
## rollingUpdate:
|
||||
## maxSurge: 25%
|
||||
## maxUnavailable: 25%
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
## @param priorityClassName Ghost pod priority class name
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases Ghost pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param extraVolumes Optionally specify extra list of additional volumes for Ghost pods
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Ghost container(s)
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param sidecars Add additional sidecar containers to the Ghost pod
|
||||
## e.g:
|
||||
## sidecars:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
sidecars: []
|
||||
## @param initContainers Add additional init containers to the Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
## e.g:
|
||||
## initContainers:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param lifecycleHooks Add lifecycle hooks to the Ghost deployment
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param podLabels Extra labels for Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param podAnnotations Annotations for Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
|
||||
##
|
||||
key: ""
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## Ghost containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "medium"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Container ports
|
||||
## @param containerPorts.http Ghost HTTP container port
|
||||
## @param containerPorts.https Ghost HTTPS container port
|
||||
##
|
||||
containerPorts:
|
||||
http: 2368
|
||||
https: 2368
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for WordPress container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## Configure Pods Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enabled Ghost pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup Set Ghost pod's Security Context fsGroup
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
# capabilities:
|
||||
# drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## Configure extra options for Ghost containers' liveness, readiness and startup probes
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param customLivenessProbe Custom livenessProbe that overrides the default one
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Custom readinessProbe that overrides the default one
|
||||
#
|
||||
# The default httpGet probe fails, and I think it's because of this:
|
||||
# - We enabled ghostEnableHttps
|
||||
# - The httpGet probe probes for http://xyz:2368, which then redirects to
|
||||
# https://xyz:2368
|
||||
# - However, Ghost itself does not provide TLS. That option just makes HTTP
|
||||
# redirect to HTTPS
|
||||
# - The probe is now expecting TLS, but Ghost is still sending regular HTTP
|
||||
# and the probe thus fails
|
||||
#
|
||||
# So we're just gonna do a TCP port check. The alternative is curl'ing and
|
||||
# expecting a 301 response, but that doesn't seem much better than the TCP
|
||||
# check, especially since it's so simple.
|
||||
customReadinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "true"
|
||||
# tcpSocket:
|
||||
# port: 2368
|
||||
|
||||
## @section Traffic Exposure Parameters
|
||||
|
||||
## Ghost service parameters
|
||||
##
|
||||
service:
|
||||
## @param service.type Ghost service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param service.ports.http Ghost service HTTP port
|
||||
## @param service.ports.https Ghost service HTTPS port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
## Node ports to expose
|
||||
## @param service.nodePorts.http Node port for HTTP
|
||||
## @param service.nodePorts.https Node port for HTTPS
|
||||
## NOTE: choose port between <30000-32767>
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
## @param service.clusterIP Ghost service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.loadBalancerIP Ghost service Load Balancer IP
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.loadBalancerSourceRanges Ghost service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.externalTrafficPolicy Ghost service external traffic policy
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.annotations Additional custom annotations for Ghost service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.extraPorts Extra port to expose on Ghost service
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
sessionAffinityConfig: {}
|
||||
## Configure the ingress resource that allows you to access the Ghost installation
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Enable ingress record generation for Ghost
|
||||
##
|
||||
enabled: true
|
||||
## @param ingress.pathType Ingress path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## @param ingress.hostname Default host for the ingress record
|
||||
##
|
||||
hostname: {{ .Values.globals.ghost.primaryHost }}
|
||||
## @param ingress.path Default path for the ingress record
|
||||
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
|
||||
##
|
||||
path: /
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.ghost.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
|
||||
## TLS certificates will be retrieved from a TLS secret with name: `\{\{- printf "%s-tls" .Values.ingress.hostname \}\}`
|
||||
## You can:
|
||||
## - Use the `ingress.secrets` parameter to create this TLS secret
|
||||
## - Rely on cert-manager to create it by setting the corresponding annotations
|
||||
## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
|
||||
##
|
||||
tls: true
|
||||
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
||||
##
|
||||
selfSigned: false
|
||||
## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
|
||||
## e.g:
|
||||
## extraHosts:
|
||||
## - name: ghost.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
||||
## e.g:
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## e.g:
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - ghost.local
|
||||
## secretName: ghost.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets Custom TLS certificates as secrets
|
||||
## NOTE: 'key' and 'certificate' are expected in PEM format
|
||||
## NOTE: 'name' should line up with a 'secretName' set further up
|
||||
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
|
||||
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
## e.g:
|
||||
## secrets:
|
||||
## - name: ghost.local-tls
|
||||
## key: |-
|
||||
## REDACTED
|
||||
## ...
|
||||
## REDACTED
|
||||
## certificate: |-
|
||||
## -----BEGIN CERTIFICATE-----
|
||||
## ...
|
||||
## -----END CERTIFICATE-----
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: {{ .Values.globals.ghost.ingressClass }}
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: example.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: example-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Persistence Parameters
|
||||
|
||||
## Persistence Parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
## @param persistence.enabled Enable persistence using Persistent Volume Claims
|
||||
##
|
||||
enabled: true
|
||||
## @param persistence.storageClass Persistent Volume storage class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
|
||||
##
|
||||
storageClass: ""
|
||||
## @param persistence.annotations Additional custom annotations for the PVC
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.accessModes [array] Persistent Volume access modes
|
||||
##
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
## @param persistence.size Persistent Volume size
|
||||
##
|
||||
size: 8Gi
|
||||
## @param persistence.existingClaim The name of an existing PVC to use for persistence
|
||||
##
|
||||
existingClaim: ""
|
||||
## @param persistence.subPath The name of a volume's sub path to mount for persistence
|
||||
##
|
||||
subPath: ""
|
||||
## 'volumePermissions' init container parameters
|
||||
## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
|
||||
## based on the podSecurityContext/containerSecurityContext parameters
|
||||
##
|
||||
volumePermissions:
|
||||
## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
|
||||
##
|
||||
enabled: false
|
||||
## OS Shell + Utility image
|
||||
## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
|
||||
## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
|
||||
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
|
||||
## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended)
|
||||
## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy
|
||||
## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/os-shell
|
||||
tag: 12-debian-12-r35
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Init container's resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "none"
|
||||
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Init container Container Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param volumePermissions.securityContext.runAsUser Set init container's Security Context runAsUser
|
||||
## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
|
||||
## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
|
||||
## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
|
||||
##
|
||||
securityContext:
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 0
|
||||
## @section Database Parameters
|
||||
|
||||
## MySQL chart configuration
|
||||
## ref: https://github.com/bitnami/charts/blob/main/bitnami/mysql/values.yaml
|
||||
##
|
||||
mysql:
|
||||
## @param mysql.enabled Deploy a MySQL server to satisfy the applications database requirements
|
||||
## To use an external database set this to false and configure the `externalDatabase` parameters
|
||||
##
|
||||
enabled: false
|
||||
## @param mysql.architecture MySQL architecture. Allowed values: `standalone` or `replication`
|
||||
##
|
||||
architecture: standalone
|
||||
## MySQL Authentication parameters
|
||||
## @param mysql.auth.rootPassword MySQL root password
|
||||
## @param mysql.auth.database MySQL custom database
|
||||
## @param mysql.auth.username MySQL custom user name
|
||||
## @param mysql.auth.password MySQL custom user password
|
||||
## @param mysql.auth.existingSecret Existing secret with MySQL credentials
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run
|
||||
## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-on-first-run
|
||||
## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-user-on-first-run
|
||||
auth:
|
||||
rootPassword: "password"
|
||||
database: bitnami_ghost
|
||||
username: bn_ghost
|
||||
password: "password"
|
||||
existingSecret: ""
|
||||
## MySQL Primary configuration
|
||||
##
|
||||
primary:
|
||||
## MySQL Primary Persistence parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
## @param mysql.primary.persistence.enabled Enable persistence on MySQL using PVC(s)
|
||||
## @param mysql.primary.persistence.storageClass Persistent Volume storage class
|
||||
## @param mysql.primary.persistence.accessModes [array] Persistent Volume access modes
|
||||
## @param mysql.primary.persistence.size Persistent Volume size
|
||||
##
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 8Gi
|
||||
## MySQL primary container's resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param mysql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
## @param mysql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## External Database Configuration
|
||||
## All of these values are only used if `mysql.enabled=false`
|
||||
##
|
||||
externalDatabase:
|
||||
## @param externalDatabase.host External Database server host
|
||||
##
|
||||
host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
|
||||
## @param externalDatabase.port External Database server port
|
||||
##
|
||||
port: 3306
|
||||
## @param externalDatabase.user External Database username
|
||||
##
|
||||
user: {{ .Values.globals.ghost.mysql.username }}
|
||||
## @param externalDatabase.password External Database user password
|
||||
##
|
||||
password: {{ .Values.globals.ghost.mysql.password }}
|
||||
## @param externalDatabase.database External Database database name
|
||||
##
|
||||
database: {{ .Values.globals.ghost.mysql.database }}
|
||||
## @param externalDatabase.existingSecret The name of an existing secret with database credentials
|
||||
## NOTE: Must contain key `mysql-password`
|
||||
## NOTE: When it's set, the `externalDatabase.password` parameter is ignored
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param externalDatabase.ssl External Database ssl
|
||||
##
|
||||
ssl: false
|
||||
## @param externalDatabase.sslCaFile External Database ssl CA filepath
|
||||
##
|
||||
sslCaFile: ""
|
||||
## @section NetworkPolicy parameters
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
||||
## Pods Service Account
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
## @param serviceAccount.create Specifies whether a ServiceAccount should be created
|
||||
## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
|
||||
## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
|
||||
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
|
||||
##
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
automountServiceAccountToken: false
|
||||
annotations: {}
|
||||
|
782
proxmox/k8s/helmfile.d/values/gitea/values.yaml.gotmpl
Normal file
782
proxmox/k8s/helmfile.d/values/gitea/values.yaml.gotmpl
Normal file
@ -0,0 +1,782 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
##
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: ""
|
||||
storageClass: ""
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override gitea.fullname template (will maintain the release name)
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override gitea.fullname template
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param namespaceOverride String to fully override common.names.namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
## @param commonAnnotations Common annotations to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param commonLabels Common labels to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template).
|
||||
##
|
||||
extraDeploy: []
|
||||
## @section Gitea parameters
|
||||
##
|
||||
|
||||
## Bitnami Gitea image version
|
||||
## ref: https://hub.docker.com/r/bitnami/gitea/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Gitea image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/gitea] Gitea Image name
|
||||
## @skip image.tag Gitea Image tag
|
||||
## @param image.digest Gitea image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Gitea image pull policy
|
||||
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||
## @param image.debug Specify if debug logs should be enabled
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/gitea
|
||||
tag: 1.23.1-debian-12-r3
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Set to true if you would like to see extra information on logs
|
||||
##
|
||||
debug: false
|
||||
## @param adminUsername User of the application
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminUsername: bn_user
|
||||
## @param adminPassword Application password
|
||||
## Defaults to a random 10-character alphanumeric string if not set
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminPassword: ""
|
||||
## @param adminEmail Admin email
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminEmail: user@example.com
|
||||
## @param appName Gitea application name
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
appName: example
|
||||
## @param runMode Gitea application host
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
runMode: prod
|
||||
## @param exposeSSH Make the SSH server accesible
|
||||
##
|
||||
exposeSSH: true
|
||||
## @param rootURL UI Root URL (for link generation)
|
||||
##
|
||||
rootURL: ""
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached
|
||||
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
||||
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
||||
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
## @param priorityClassName Gitea pods' priorityClassName
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases [array] Add deployment host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param extraEnvVars Extra environment variables
|
||||
## For example:
|
||||
##
|
||||
extraEnvVars: []
|
||||
# - name: BEARER_AUTH
|
||||
# value: true
|
||||
## @param extraEnvVarsCM ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data)
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @param extraVolumes Array of extra volumes to be added to the deployment (evaluated as template). Requires setting `extraVolumeMounts`
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Array of extra volume mounts to be added to the container (evaluated as template). Normally used with `extraVolumes`.
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param initContainers Add additional init containers to the pod (evaluated as a template)
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param sidecars Attach additional containers to the pod (evaluated as a template)
|
||||
##
|
||||
sidecars: []
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param existingSecret Name of a secret with the application password
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param existingSecretKey Key inside the existing secret containing the password
|
||||
##
|
||||
existingSecretKey: "admin-password"
|
||||
## SMTP mail delivery configuration
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea/#smtp-configuration
|
||||
## @param smtpHost SMTP host
|
||||
## @param smtpPort SMTP port
|
||||
## @param smtpUser SMTP user
|
||||
## @param smtpPassword SMTP password
|
||||
##
|
||||
smtpHost: ""
|
||||
smtpPort: ""
|
||||
smtpUser: ""
|
||||
smtpPassword: ""
|
||||
## @param smtpExistingSecret The name of an existing secret with SMTP credentials
|
||||
## NOTE: Must contain key `smtp-password`
|
||||
## NOTE: When it's set, the `smtpPassword` parameter is ignored
|
||||
##
|
||||
smtpExistingSecret: ""
|
||||
## @param containerPorts [object] Container ports
|
||||
##
|
||||
containerPorts:
|
||||
http: 3000
|
||||
ssh: 2222
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for Gitea container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## Enable OpenID Configurations
|
||||
## @param openid.enableSignIn Enable sign in with OpenID
|
||||
## @param openid.enableSignUp Enable sign up with OpenID
|
||||
openid:
|
||||
enableSignIn: false
|
||||
enableSignUp: false
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
## @param persistence.enabled Enable persistence using PVC
|
||||
##
|
||||
enabled: true
|
||||
## @param persistence.storageClass PVC Storage Class for Gitea volume
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: ""
|
||||
## @param persistence.accessModes PVC Access Mode for Gitea volume
|
||||
## Requires persistence.enabled: true
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
##
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
## @param persistence.size PVC Storage Request for Gitea volume
|
||||
##
|
||||
size: 8Gi
|
||||
## @param persistence.dataSource Custom PVC data source
|
||||
##
|
||||
dataSource: {}
|
||||
## @param persistence.existingClaim A manually managed Persistent Volume Claim
|
||||
## Requires persistence.enabled: true
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
##
|
||||
existingClaim: ""
|
||||
## @param persistence.hostPath If defined, the gitea-data volume will mount to the specified hostPath.
|
||||
## Requires persistence.enabled: true
|
||||
## Requires persistence.existingClaim: nil|false
|
||||
## Default: nil.
|
||||
##
|
||||
hostPath: ""
|
||||
## @param persistence.annotations Persistent Volume Claim annotations
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.selector Selector to match an existing Persistent Volume for Gitea data PVC
|
||||
## If set, the PVC can't have a PV dynamically provisioned for it
|
||||
## E.g.
|
||||
## selector:
|
||||
## matchLabels:
|
||||
## app: my-app
|
||||
##
|
||||
selector: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
type: ""
|
||||
## E.g.
|
||||
## key: "kubernetes.io/e2e-az-name"
|
||||
##
|
||||
key: ""
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## Gitea container's resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "micro"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Configure Pods Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enable Gitea pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup Gitea pods' group ID
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## Configure extra options for startup probe
|
||||
## Gitea core exposes / to unauthenticated requests, making it a good
|
||||
## default startup and readiness path. However, that may not always be the
|
||||
## case. For example, if the image value is overridden to an image containing a
|
||||
## module that alters that route, or an image that does not auto-install Gitea.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.path Request path for startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
path: /
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## Configure extra options for liveness probe
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## Configure extra options for readiness probe
|
||||
## Gitea core exposes / to unauthenticated requests, making it a good
|
||||
## default startup and readiness path. However, that may not always be the
|
||||
## case. For example, if the image value is overridden to an image containing a
|
||||
## module that alters that route, or an image that does not auto-install Gitea.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.path Request path for readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
path: /
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## @param customStartupProbe Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param customLivenessProbe Override default liveness probe
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## @param lifecycleHooks LifecycleHook to set additional configuration at startup Evaluated as a template
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param podAnnotations Pod annotations
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podLabels Add additional labels to the pod (evaluated as a template)
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @section Traffic Exposure Parameters
|
||||
##
|
||||
|
||||
## Kubernetes configuration. For minikube, set this to NodePort, elsewhere use LoadBalancer
|
||||
##
|
||||
service:
|
||||
## @param service.type Kubernetes Service type
|
||||
##
|
||||
type: LoadBalancer
|
||||
## @param service.ports.http Service HTTP port
|
||||
## @param service.ports.ssh Service SSH port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
ssh: 22
|
||||
## @param service.loadBalancerSourceRanges Restricts access for LoadBalancer (only with `service.type: LoadBalancer`)
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 0.0.0.0/0
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.loadBalancerIP loadBalancerIP for the Gitea Service (optional, cloud specific)
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.nodePorts [object] Kubernetes node port
|
||||
## nodePorts:
|
||||
## http: <to set explicitly, choose port between 30000-32767>
|
||||
## https: <to set explicitly, choose port between 30000-32767>
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
ssh: ""
|
||||
## @param service.externalTrafficPolicy Enable client source IP preservation
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.clusterIP Gitea service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.annotations Additional custom annotations for Gitea service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
||||
## Configure the ingress resource that allows you to access the
|
||||
## Gitea installation. Set up the URL
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Enable ingress controller resource
|
||||
##
|
||||
enabled: false
|
||||
## @param ingress.pathType Ingress Path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.apiVersion Override API Version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: ""
|
||||
## @param ingress.hostname Default host for the ingress resource
|
||||
##
|
||||
hostname: "gitea.local"
|
||||
## @param ingress.path The Path to Gitea. You may need to set this to '/*' in order to use this
|
||||
## with ALB ingress controllers.
|
||||
##
|
||||
path: /
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations: {}
|
||||
## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
|
||||
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
|
||||
##
|
||||
tls: false
|
||||
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
||||
##
|
||||
selfSigned: false
|
||||
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
|
||||
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
|
||||
## extraHosts:
|
||||
## - name: gitea.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host.
|
||||
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
|
||||
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - gitea.local
|
||||
## secretName: gitea.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## REDACTED
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
## Example:
|
||||
## - name: gitea.local-tls
|
||||
## key:
|
||||
## certificate:
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: example.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: example-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Other Parameters
|
||||
##
|
||||
|
||||
## Service account for Gitea to use.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
## @param serviceAccount.create Enable creation of ServiceAccount for Gitea pod
|
||||
##
|
||||
create: true
|
||||
## @param serviceAccount.name The name of the ServiceAccount to use.
|
||||
## If not set and create is true, a name is generated using the common.names.fullname template
|
||||
##
|
||||
name: ""
|
||||
## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
||||
## Can be set to false if pods using this serviceAccount do not need to use K8s API
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
||||
##
|
||||
annotations: {}
|
||||
## @section Database parameters
|
||||
##
|
||||
|
||||
## PostgreSQL chart configuration
|
||||
## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
|
||||
## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart
|
||||
## @param postgresql.auth.username Name for a custom user to create
|
||||
## @param postgresql.auth.password Password for the custom user to create
|
||||
## @param postgresql.auth.database Name for a custom database to create
|
||||
## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials
|
||||
## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`)
|
||||
## @param postgresql.service.ports.postgresql PostgreSQL service port
|
||||
##
|
||||
postgresql:
|
||||
enabled: false
|
||||
auth:
|
||||
username: bn_gitea
|
||||
password: ""
|
||||
database: bitnami_gitea
|
||||
existingSecret: ""
|
||||
architecture: standalone
|
||||
service:
|
||||
ports:
|
||||
postgresql: 5432
|
||||
## External PostgreSQL configuration
|
||||
## All of these values are only used when postgresql.enabled is set to false
|
||||
## @param externalDatabase.host Database host
|
||||
## @param externalDatabase.port Database port number
|
||||
## @param externalDatabase.user Non-root username for JupyterHub
|
||||
## @param externalDatabase.password Password for the non-root username for JupyterHub
|
||||
## @param externalDatabase.database JupyterHub database name
|
||||
## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials
|
||||
## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials
|
||||
##
|
||||
externalDatabase:
|
||||
host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
|
||||
port: 5432
|
||||
user: {{ .Values.globals.gitea.postgres.username }}
|
||||
database: {{ .Values.globals.gitea.postgres.database }}
|
||||
password: {{ .Values.globals.gitea.postgres.password }}
|
||||
existingSecret: ""
|
||||
existingSecretPasswordKey: "db-password"
|
||||
## @section Volume Permissions parameters
|
||||
##
|
||||
|
||||
## Init containers parameters:
|
||||
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
|
||||
##
|
||||
volumePermissions:
|
||||
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
|
||||
##
|
||||
enabled: false
|
||||
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
|
||||
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name
|
||||
## @skip volumePermissions.image.tag Init container volume-permissions image tag
|
||||
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
|
||||
## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/os-shell
|
||||
tag: 12-debian-12-r35
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Init containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "nano"
|
||||
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
|
1398
proxmox/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl
Normal file
1398
proxmox/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
1
proxmox/k8s/helmfile.d/values/globals/.gitignore
vendored
Normal file
1
proxmox/k8s/helmfile.d/values/globals/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
secrets.yaml
|
216
proxmox/k8s/helmfile.d/values/globals/staging.yaml.gotmpl
Normal file
216
proxmox/k8s/helmfile.d/values/globals/staging.yaml.gotmpl
Normal file
@ -0,0 +1,216 @@
|
||||
{{ $email := "tonydu121@hotmail.com" }}
|
||||
{{ $domain := "mnke.org" }}
|
||||
{{ $subdomain := "dolo" }}
|
||||
{{ $appDomain := print $subdomain "." $domain }}
|
||||
# This should be an IP in the MetalLB range
|
||||
{{ $primaryLoadBalancerIP := "10.0.185.128" }}
|
||||
{{ $environment := "staging" }}
|
||||
{{ $ingressClass := "traefik" }}
|
||||
{{ $nfsStorageClass := "nfs-client" }}
|
||||
{{ $longhornStorageClass := "longhorn" }}
|
||||
|
||||
{{
|
||||
$ghostDatabase := dict
|
||||
"database" "ghost"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/password" )
|
||||
}}
|
||||
{{
|
||||
$mysqlDatabases := list
|
||||
$ghostDatabase
|
||||
}}
|
||||
|
||||
{{
|
||||
$authentikDatabase := dict
|
||||
"database" "authentik"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$harborDatabase := dict
|
||||
"database" "harborcore"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$giteaDatabase := dict
|
||||
"database" "gitea"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$postgresDatabases := list
|
||||
$authentikDatabase
|
||||
$harborDatabase
|
||||
$giteaDatabase
|
||||
}}
|
||||
|
||||
globals:
|
||||
email: {{ $email }}
|
||||
environment: {{ $environment }}
|
||||
|
||||
certManager:
|
||||
namespace: cert-manager
|
||||
|
||||
traefik:
|
||||
namespace: traefik
|
||||
ingressClass: {{ $ingressClass }}
|
||||
loadBalancerIP: {{ $primaryLoadBalancerIP }}
|
||||
|
||||
certs:
|
||||
acmeEmail: {{ $email }}
|
||||
cloudflareEmail: {{ $email }}
|
||||
certIssuerMode: {{ $environment }}
|
||||
|
||||
cloudflareSecretToken: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#cloudflare/secretToken" }}
|
||||
cloudflareTokenSecretName: cloudflare-token-secret
|
||||
|
||||
issuerName: letsencrypt
|
||||
privateKeySecretRef: letsencrypt
|
||||
|
||||
hlMnkeOrg:
|
||||
certificateName: {{ $subdomain }}.{{ $domain }}
|
||||
certificateSecretName: {{ $subdomain }}.{{ $domain }}-tls
|
||||
certificateNamespace: default
|
||||
commonName: "{{ $appDomain }}"
|
||||
dnsZones:
|
||||
- "{{ $domain }}"
|
||||
dnsNames:
|
||||
- "{{ $appDomain }}"
|
||||
- "*.{{ $appDomain }}"
|
||||
|
||||
longhorn:
|
||||
namespace: longhorn-system
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
|
||||
nfsSubdirExternalProvisioner:
|
||||
namespace: nfs-subdir-external-provisioner
|
||||
replicaCount: 1
|
||||
nfs:
|
||||
server: truenas.local
|
||||
path: /mnt/emc14t9/k8s-pv
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
accessModes: ReadWriteMany
|
||||
|
||||
rancher:
|
||||
namespace: cattle-system
|
||||
ingressClass: {{ $ingressClass }}
|
||||
hostname: rancher.{{ $appDomain }}
|
||||
replicas: 3
|
||||
bootstrapPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#rancher/bootstrapPassword" }}
|
||||
|
||||
uptimeKuma:
|
||||
namespace: uptime-kuma
|
||||
ingressClass: {{ $ingressClass }}
|
||||
hosts:
|
||||
- uptime.{{ $appDomain }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
|
||||
mysql:
|
||||
namespace: db
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/password" }}
|
||||
rootPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/rootPassword" }}
|
||||
databases:
|
||||
{{ $mysqlDatabases | toYaml | nindent 4 }}
|
||||
|
||||
postgres:
|
||||
namespace: db
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/password" }}
|
||||
postgresPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/postgresPassword" }}
|
||||
databases:
|
||||
{{ $postgresDatabases | toYaml | nindent 4 }}
|
||||
|
||||
phpmyadmin:
|
||||
namespace: db
|
||||
hostname: pma.{{ $appDomain }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
|
||||
pgadmin4:
|
||||
namespace: db
|
||||
hostname: pg.{{ $appDomain }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
storageSize: 2Gi
|
||||
accessMode: ReadWriteOnce
|
||||
# can be email or nickname
|
||||
email: tony@mnke.org
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#pgadmin4/password" }}
|
||||
|
||||
redis:
|
||||
namespace: redis
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
storageSize: 8Gi
|
||||
accessMode: ReadWriteMany
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#redis/password" }}
|
||||
|
||||
ghost:
|
||||
namespace: ghost
|
||||
primaryHost: blog.mnke.org
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
ghostEmail: {{ $email }}
|
||||
ghostPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/password" }}
|
||||
mysql:
|
||||
{{ $ghostDatabase | toYaml | nindent 6 }}
|
||||
|
||||
authentik:
|
||||
namespace: authentik
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
secretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/secretKey" }}
|
||||
hostnames:
|
||||
- auth.{{ $appDomain }}
|
||||
- auth.{{ $domain }}
|
||||
postgres:
|
||||
{{ $authentikDatabase | toYaml | nindent 6 }}
|
||||
|
||||
harbor:
|
||||
namespace: harbor
|
||||
hostname: harbor.{{ $appDomain }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/password" }}
|
||||
htpasswd: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/htpasswd" }}
|
||||
registrySecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/registrySecret" }}
|
||||
jobserviceSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/jobserviceSecret" }}
|
||||
coreSecretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecretKey" }}
|
||||
coreSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecret" }}
|
||||
coreCsrfKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreCsrfKey" }}
|
||||
coreTlsKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsKey" | quote }}
|
||||
coreTlsCert: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsCert" | quote }}
|
||||
|
||||
postgres:
|
||||
{{ $harborDatabase | toYaml | nindent 6 }}
|
||||
|
||||
kubePrometheusStack:
|
||||
namespace: kube-prometheus-stack
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
thanosRuler:
|
||||
storageSize: 4Gi
|
||||
prometheus:
|
||||
storageSize: 4Gi
|
||||
grafana:
|
||||
storageSize: 4Gi
|
||||
adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#kubePrometheusStack/grafana/adminPassword" }}
|
||||
hosts:
|
||||
- gf.{{ $appDomain }}
|
||||
|
||||
argocd:
|
||||
namespace: argo-cd
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
hostname: argocd.{{ $appDomain }}
|
||||
adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#argocd/adminPassword" }}
|
||||
|
||||
gitea:
|
||||
namespace: gitea
|
||||
ingressClass: {{ $ingressClass }}
|
||||
postgres:
|
||||
{{ $giteaDatabase | toYaml | nindent 6 }}
|
3815
proxmox/k8s/helmfile.d/values/harbor/values.yaml.gotmpl
Normal file
3815
proxmox/k8s/helmfile.d/values/harbor/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
34
proxmox/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl
Normal file
34
proxmox/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl
Normal file
@ -0,0 +1,34 @@
|
||||
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
postgres:
|
||||
image:
|
||||
ref: postgres
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
|
||||
username: postgres
|
||||
password: {{ .Values.globals.postgres.postgresPassword }}
|
||||
databases:
|
||||
{{- range .Values.globals.postgres.databases }}
|
||||
- database: {{ .database }}
|
||||
username: {{ .username }}
|
||||
password: {{ .password }}
|
||||
{{- end }}
|
||||
mysql:
|
||||
image:
|
||||
ref: mysql
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
|
||||
username: root
|
||||
password: {{ .Values.globals.mysql.rootPassword }}
|
||||
databases:
|
||||
{{- range .Values.globals.mysql.databases }}
|
||||
- database: {{ .database }}
|
||||
username: {{ .username }}
|
||||
password: {{ .password }}
|
||||
{{- end }}
|
File diff suppressed because it is too large
Load Diff
539
proxmox/k8s/helmfile.d/values/longhorn/values.yaml.gotmpl
Normal file
539
proxmox/k8s/helmfile.d/values/longhorn/values.yaml.gotmpl
Normal file
@ -0,0 +1,539 @@
|
||||
# Default values for longhorn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
# -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
|
||||
tolerations: []
|
||||
# -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
|
||||
nodeSelector: {}
|
||||
cattle:
|
||||
# -- Default system registry.
|
||||
systemDefaultRegistry: ""
|
||||
windowsCluster:
|
||||
# -- Setting that allows Longhorn to run on a Rancher Windows cluster.
|
||||
enabled: false
|
||||
# -- Toleration for Linux nodes that can run user-deployed Longhorn components.
|
||||
tolerations:
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
# -- Node selector for Linux nodes that can run user-deployed Longhorn components.
|
||||
nodeSelector:
|
||||
kubernetes.io/os: "linux"
|
||||
defaultSetting:
|
||||
# -- Toleration for system-managed Longhorn components.
|
||||
taintToleration: cattle.io/os=linux:NoSchedule
|
||||
# -- Node selector for system-managed Longhorn components.
|
||||
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
|
||||
|
||||
networkPolicies:
|
||||
# -- Setting that allows you to enable network policies that control access to Longhorn pods.
|
||||
enabled: false
|
||||
# -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
|
||||
type: "k3s"
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
# -- Repository for the Longhorn Engine image.
|
||||
repository: longhornio/longhorn-engine
|
||||
# -- Tag for the Longhorn Engine image.
|
||||
tag: v1.8.0
|
||||
manager:
|
||||
# -- Repository for the Longhorn Manager image.
|
||||
repository: longhornio/longhorn-manager
|
||||
# -- Tag for the Longhorn Manager image.
|
||||
tag: v1.8.0
|
||||
ui:
|
||||
# -- Repository for the Longhorn UI image.
|
||||
repository: longhornio/longhorn-ui
|
||||
# -- Tag for the Longhorn UI image.
|
||||
tag: v1.8.0
|
||||
instanceManager:
|
||||
# -- Repository for the Longhorn Instance Manager image.
|
||||
repository: longhornio/longhorn-instance-manager
|
||||
# -- Tag for the Longhorn Instance Manager image.
|
||||
tag: v1.8.0
|
||||
shareManager:
|
||||
# -- Repository for the Longhorn Share Manager image.
|
||||
repository: longhornio/longhorn-share-manager
|
||||
# -- Tag for the Longhorn Share Manager image.
|
||||
tag: v1.8.0
|
||||
backingImageManager:
|
||||
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/backing-image-manager
|
||||
# -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
|
||||
tag: v1.8.0
|
||||
supportBundleKit:
|
||||
# -- Repository for the Longhorn Support Bundle Manager image.
|
||||
repository: longhornio/support-bundle-kit
|
||||
# -- Tag for the Longhorn Support Bundle Manager image.
|
||||
tag: v0.0.49
|
||||
csi:
|
||||
attacher:
|
||||
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-attacher
|
||||
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
|
||||
tag: v4.8.0
|
||||
provisioner:
|
||||
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-provisioner
|
||||
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
|
||||
tag: v5.1.0-20241220
|
||||
nodeDriverRegistrar:
|
||||
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-node-driver-registrar
|
||||
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
|
||||
tag: v2.13.0
|
||||
resizer:
|
||||
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-resizer
|
||||
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
|
||||
tag: v1.13.1
|
||||
snapshotter:
|
||||
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-snapshotter
|
||||
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
|
||||
tag: v8.2.0
|
||||
livenessProbe:
|
||||
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/livenessprobe
|
||||
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
|
||||
tag: v2.15.0
|
||||
openshift:
|
||||
oauthProxy:
|
||||
# -- Repository for the OAuth Proxy image. Specify the upstream image (for example, "quay.io/openshift/origin-oauth-proxy"). This setting applies only to OpenShift users.
|
||||
repository: ""
|
||||
# -- Tag for the OAuth Proxy image. Specify OCP/OKD version 4.1 or later (including version 4.15, which is available at quay.io/openshift/origin-oauth-proxy:4.15). This setting applies only to OpenShift users.
|
||||
tag: ""
|
||||
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
# -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
|
||||
type: ClusterIP
|
||||
# -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
|
||||
nodePort: null
|
||||
manager:
|
||||
# -- Service type for Longhorn Manager.
|
||||
type: ClusterIP
|
||||
# -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
|
||||
nodePort: ""
|
||||
|
||||
persistence:
|
||||
# -- Setting that allows you to specify the default Longhorn StorageClass.
|
||||
defaultClass: true
|
||||
# -- Filesystem type of the default Longhorn StorageClass.
|
||||
defaultFsType: ext4
|
||||
# -- mkfs parameters of the default Longhorn StorageClass.
|
||||
defaultMkfsParams: ""
|
||||
# -- Replica count of the default Longhorn StorageClass.
|
||||
defaultClassReplicaCount: 3
|
||||
# -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
|
||||
defaultDataLocality: disabled
|
||||
# -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
|
||||
reclaimPolicy: Delete
|
||||
# -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
|
||||
migratable: false
|
||||
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery.
|
||||
disableRevisionCounter: "true"
|
||||
# -- Set NFS mount options for Longhorn StorageClass for RWX volumes
|
||||
nfsOptions: ""
|
||||
recurringJobSelector:
|
||||
# -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
|
||||
jobList: []
|
||||
backingImage:
|
||||
# -- Setting that allows you to use a backing image in a Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
|
||||
name: ~
|
||||
# -- Data source type of a backing image used in a Longhorn StorageClass.
|
||||
# If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
|
||||
# If the backing image does not exist, Longhorn creates one using the specified data source type.
|
||||
dataSourceType: ~
|
||||
# -- Data source parameters of a backing image used in a Longhorn StorageClass.
|
||||
# You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
|
||||
dataSourceParameters: ~
|
||||
# -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
|
||||
expectedChecksum: ~
|
||||
defaultDiskSelector:
|
||||
# -- Setting that allows you to enable the disk selector for the default Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata")
|
||||
selector: ""
|
||||
defaultNodeSelector:
|
||||
# -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
|
||||
selector: ""
|
||||
# -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
|
||||
removeSnapshotsDuringFilesystemTrim: ignored
|
||||
# -- Setting that allows you to specify the data engine version for the default Longhorn StorageClass. (Options: "v1", "v2")
|
||||
dataEngine: v1
|
||||
# -- Setting that allows you to specify the backup target for the default Longhorn StorageClass.
|
||||
backupTargetName: default
|
||||
|
||||
preUpgradeChecker:
|
||||
# -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
|
||||
jobEnabled: true
|
||||
# -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
|
||||
upgradeVersionCheck: true
|
||||
|
||||
csi:
|
||||
# -- kubelet root directory. When unspecified, Longhorn uses the default value.
|
||||
kubeletRootDir: ~
|
||||
# -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
|
||||
attacherReplicaCount: ~
|
||||
# -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
|
||||
provisionerReplicaCount: ~
|
||||
# -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
|
||||
resizerReplicaCount: ~
|
||||
# -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
# -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
# -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
# -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
|
||||
defaultDataPath: ~
|
||||
# -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
|
||||
defaultDataLocality: ~
|
||||
# -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
|
||||
replicaSoftAntiAffinity: ~
|
||||
# -- Setting that automatically rebalances replicas when an available node is discovered.
|
||||
replicaAutoBalance: ~
|
||||
# -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
|
||||
storageOverProvisioningPercentage: ~
|
||||
# -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
|
||||
storageMinimalAvailablePercentage: ~
|
||||
# -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
|
||||
storageReservedPercentageForDefaultDisk: ~
|
||||
# -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
|
||||
upgradeChecker: ~
|
||||
# -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
|
||||
defaultReplicaCount: ~
|
||||
# -- Default name of Longhorn static StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. "storageClassName" needs to be an existing StorageClass. The default value is "longhorn-static".
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
# -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
|
||||
failedBackupTTL: ~
|
||||
# -- Number of minutes that Longhorn allows for the backup execution. The default value is "1".
|
||||
backupExecutionTimeout: ~
|
||||
# -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
|
||||
restoreVolumeRecurringJobs: ~
|
||||
# -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
|
||||
recurringSuccessfulJobsHistoryLimit: ~
|
||||
# -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
|
||||
recurringFailedJobsHistoryLimit: ~
|
||||
# -- Maximum number of snapshots or backups to be retained.
|
||||
recurringJobMaxRetention: ~
|
||||
# -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
|
||||
supportBundleFailedHistoryLimit: ~
|
||||
# -- Taint or toleration for system-managed Longhorn components.
|
||||
# Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
|
||||
taintToleration: ~
|
||||
# -- Node selector for system-managed Longhorn components.
|
||||
systemManagedComponentsNodeSelector: ~
|
||||
# -- PriorityClass for system-managed Longhorn components.
|
||||
# This setting can help prevent Longhorn components from being evicted under Node Pressure.
|
||||
# Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
|
||||
priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
|
||||
# -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
|
||||
autoSalvage: ~
|
||||
# -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
# -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
# -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
# -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
|
||||
replicaDiskSoftAntiAffinity: ~
|
||||
# -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
# -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
|
||||
nodeDrainPolicy: ~
|
||||
# -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
|
||||
detachManuallyAttachedVolumesWhenCordoned: ~
|
||||
# -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
# -- Maximum number of replicas that can be concurrently rebuilt on each node.
|
||||
concurrentReplicaRebuildPerNodeLimit: ~
|
||||
# -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
|
||||
concurrentVolumeBackupRestorePerNodeLimit: ~
|
||||
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
|
||||
disableRevisionCounter: "true"
|
||||
# -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
# -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
# -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
# -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
|
||||
autoCleanupRecurringJobBackupSnapshot: ~
|
||||
# -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
|
||||
concurrentAutomaticEngineUpgradePerNodeLimit: ~
|
||||
# -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
|
||||
backingImageCleanupWaitInterval: ~
|
||||
# -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
|
||||
backingImageRecoveryWaitInterval: ~
|
||||
# -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
|
||||
guaranteedInstanceManagerCPU: ~
|
||||
# -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
|
||||
kubernetesClusterAutoscalerEnabled: ~
|
||||
# -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
|
||||
orphanAutoDeletion: ~
|
||||
# -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
|
||||
storageNetwork: ~
|
||||
# -- Flag that prevents accidental uninstallation of Longhorn.
|
||||
deletingConfirmationFlag: ~
|
||||
# -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
|
||||
engineReplicaTimeout: ~
|
||||
# -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
|
||||
snapshotDataIntegrity: ~
|
||||
# -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
|
||||
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
|
||||
# -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
|
||||
snapshotDataIntegrityCronjob: ~
|
||||
# -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
|
||||
removeSnapshotsDuringFilesystemTrim: ~
|
||||
# -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
|
||||
fastReplicaRebuildEnabled: ~
|
||||
# -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
|
||||
replicaFileSyncHttpClientTimeout: ~
|
||||
# -- Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations.
|
||||
longGRPCTimeOut: ~
|
||||
# -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
|
||||
logLevel: ~
|
||||
# -- Setting that allows you to specify a backup compression method.
|
||||
backupCompressionMethod: ~
|
||||
# -- Maximum number of worker threads that can concurrently run for each backup.
|
||||
backupConcurrentLimit: ~
|
||||
# -- Maximum number of worker threads that can concurrently run for each restore operation.
|
||||
restoreConcurrentLimit: ~
|
||||
# -- Setting that allows you to enable the V1 Data Engine.
|
||||
v1DataEngine: ~
|
||||
# -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is an experimental feature and should not be used in production environments.
|
||||
v2DataEngine: ~
|
||||
# -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
|
||||
v2DataEngineHugepageLimit: ~
|
||||
# -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
|
||||
v2DataEngineGuaranteedInstanceManagerCPU: ~
|
||||
# -- CPU cores on which the Storage Performance Development Kit (SPDK) target daemon should run. The SPDK target daemon is located in each Instance Manager pod. Ensure that the number of cores is less than or equal to the guaranteed Instance Manager CPUs for the V2 Data Engine. The default value is "0x1".
|
||||
v2DataEngineCPUMask: ~
|
||||
# -- Setting that allows scheduling of empty node selector volumes to any node.
|
||||
allowEmptyNodeSelectorVolume: ~
|
||||
# -- Setting that allows scheduling of empty disk selector volumes to any disk.
|
||||
allowEmptyDiskSelectorVolume: ~
|
||||
# -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
|
||||
allowCollectingLonghornUsageMetrics: ~
|
||||
# -- Setting that temporarily prevents all attempts to purge volume snapshots.
|
||||
disableSnapshotPurge: ~
|
||||
# -- Maximum snapshot count for a volume. The value should be between 2 to 250
|
||||
snapshotMaxCount: ~
|
||||
# -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
|
||||
v2DataEngineLogLevel: ~
|
||||
# -- Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
|
||||
v2DataEngineLogFlags: ~
|
||||
# -- Setting that freezes the filesystem on the root partition before a snapshot is created.
|
||||
freezeFilesystemForSnapshot: ~
|
||||
# -- Setting that automatically cleans up the snapshot when the backup is deleted.
|
||||
autoCleanupSnapshotWhenDeleteBackup: ~
|
||||
# -- Setting that allows Longhorn to detect node failure and immediately migrate affected RWX volumes.
|
||||
rwxVolumeFastFailover: ~
|
||||
|
||||
# -- Setting that allows you to update the default backupstore.
|
||||
defaultBackupStore:
|
||||
# -- Endpoint used to access the default backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
|
||||
backupTarget: ~
|
||||
# -- Name of the Kubernetes secret associated with the default backup target.
|
||||
backupTargetCredentialSecret: ~
|
||||
# -- Number of seconds that Longhorn waits before checking the default backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
|
||||
pollInterval: ~
|
||||
|
||||
privateRegistry:
|
||||
# -- Setting that allows you to create a private registry secret.
|
||||
createSecret: ~
|
||||
# -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
|
||||
registryUrl: ~
|
||||
# -- User account used for authenticating with a private registry.
|
||||
registryUser: ~
|
||||
# -- Password for authenticating with a private registry.
|
||||
registryPasswd: ~
|
||||
# -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
|
||||
registrySecret: ~
|
||||
|
||||
longhornManager:
|
||||
log:
|
||||
# -- Format of Longhorn Manager logs. (Options: "plain", "json")
|
||||
format: plain
|
||||
# -- PriorityClass for Longhorn Manager.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
# -- Annotation for the Longhorn Manager service.
|
||||
serviceAnnotations: {}
|
||||
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# annotation-key1: "annotation-value1"
|
||||
# annotation-key2: "annotation-value2"
|
||||
|
||||
longhornDriver:
|
||||
log:
|
||||
# -- Format of longhorn-driver logs. (Options: "plain", "json")
|
||||
format: plain
|
||||
# -- PriorityClass for Longhorn Driver.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornUI:
|
||||
# -- Replica count for Longhorn UI.
|
||||
replicas: 2
|
||||
# -- PriorityClass for Longhorn UI.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
ingress:
|
||||
# -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
|
||||
enabled: false
|
||||
|
||||
# -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
|
||||
# ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
|
||||
ingressClassName: ~
|
||||
|
||||
# -- Hostname of the Layer 7 load balancer.
|
||||
host: sslip.io
|
||||
|
||||
# -- Setting that allows you to enable TLS on ingress records.
|
||||
tls: false
|
||||
|
||||
# -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
|
||||
secureBackends: false
|
||||
|
||||
# -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
# -- Default ingress path. You can access the Longhorn UI by following the full ingress path \{\{host\}\}+\{\{path\}\}.
|
||||
path: /
|
||||
|
||||
# -- Ingress path type. To maintain backward compatibility, the default value is "ImplementationSpecific".
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
# -- Ingress annotations in the form of key-value pairs.
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
# -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## REDACTED
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
# - name: longhorn.local-tls
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
|
||||
enablePSP: false
|
||||
|
||||
# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
|
||||
namespaceOverride: ""
|
||||
|
||||
# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
|
||||
annotations: {}
|
||||
|
||||
serviceAccount:
|
||||
# -- Annotations to add to the service account
|
||||
annotations: {}
|
||||
|
||||
metrics:
|
||||
serviceMonitor:
|
||||
# -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
|
||||
enabled: false
|
||||
# -- Additional labels for the Prometheus ServiceMonitor resource.
|
||||
additionalLabels: {}
|
||||
# -- Annotations for the Prometheus ServiceMonitor resource.
|
||||
annotations: {}
|
||||
# -- Interval at which Prometheus scrapes the metrics from the target.
|
||||
interval: ""
|
||||
# -- Timeout after which Prometheus considers the scrape to be failed.
|
||||
scrapeTimeout: ""
|
||||
# -- Configures the relabeling rules to apply the target’s metadata labels. See the [Prometheus Operator
|
||||
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
|
||||
# formatting details.
|
||||
relabelings: []
|
||||
# -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator
|
||||
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
|
||||
# formatting details.
|
||||
metricRelabelings: []
|
||||
|
||||
## openshift settings
|
||||
openshift:
|
||||
# -- Setting that allows Longhorn to integrate with OpenShift.
|
||||
enabled: false
|
||||
ui:
|
||||
# -- Route for connections between Longhorn and the OpenShift web console.
|
||||
route: "longhorn-ui"
|
||||
# -- Port for accessing the OpenShift web console.
|
||||
port: 443
|
||||
# -- Port for proxy that provides access to the OpenShift web console.
|
||||
proxy: 8443
|
||||
|
||||
# -- Setting that allows Longhorn to generate code coverage profiles.
|
||||
enableGoCoverDir: false
|
||||
|
1614
proxmox/k8s/helmfile.d/values/mysql/values.yaml.gotmpl
Normal file
1614
proxmox/k8s/helmfile.d/values/mysql/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,115 @@
|
||||
replicaCount: {{ .Values.globals.nfsSubdirExternalProvisioner.replicaCount }}
|
||||
strategyType: Recreate
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
|
||||
tag: v4.0.2
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets: []
|
||||
|
||||
nfs:
|
||||
server: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.server }}
|
||||
path: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.path }}
|
||||
mountOptions:
|
||||
volumeName: nfs-subdir-external-provisioner-root
|
||||
# Reclaim policy for the main nfs volume
|
||||
reclaimPolicy: Retain
|
||||
|
||||
# For creating the StorageClass automatically:
|
||||
storageClass:
|
||||
create: true
|
||||
|
||||
# Set a provisioner name. If unset, a name will be generated.
|
||||
# provisionerName:
|
||||
|
||||
# Set StorageClass as the default StorageClass
|
||||
# Ignored if storageClass.create is false
|
||||
defaultClass: true
|
||||
|
||||
# Set a StorageClass name
|
||||
# Ignored if storageClass.create is false
|
||||
name: {{ .Values.globals.nfsSubdirExternalProvisioner.storageClass }}
|
||||
|
||||
# Allow volume to be expanded dynamically
|
||||
allowVolumeExpansion: true
|
||||
|
||||
# Method used to reclaim an obsoleted volume
|
||||
reclaimPolicy: Delete
|
||||
|
||||
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
|
||||
archiveOnDelete: true
|
||||
|
||||
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
|
||||
# Overrides archiveOnDelete.
|
||||
# Ignored if value not set.
|
||||
onDelete:
|
||||
|
||||
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
|
||||
# Ignored if value not set.
|
||||
pathPattern:
|
||||
|
||||
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
|
||||
accessModes: {{ .Values.globals.nfsSubdirExternalProvisioner.accessModes }}
|
||||
|
||||
# Set volume bindinng mode - Immediate or WaitForFirstConsumer
|
||||
volumeBindingMode: Immediate
|
||||
|
||||
# Storage class annotations
|
||||
annotations: {}
|
||||
|
||||
leaderElection:
|
||||
# When set to false leader election will be disabled
|
||||
enabled: true
|
||||
|
||||
## For RBAC support:
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
# Deployment pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
## Set pod priorityClassName
|
||||
# priorityClassName: ""
|
||||
|
||||
podSecurityContext: {}
|
||||
|
||||
securityContext: {}
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# Additional labels for any resource created
|
||||
labels: {}
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
maxUnavailable: 1
|
||||
|
420
proxmox/k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl
Normal file
420
proxmox/k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl
Normal file
@ -0,0 +1,420 @@
|
||||
# Default values for pgAdmin4.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
## pgAdmin4 container image
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: dpage/pgadmin4
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Deployment annotations
|
||||
annotations: {}
|
||||
|
||||
## commonLabels Add labels to all the deployed resources
|
||||
commonLabels: {}
|
||||
|
||||
## priorityClassName
|
||||
priorityClassName: ""
|
||||
|
||||
## Deployment entrypoint override
|
||||
## Useful when there's a requirement to modify container's default:
|
||||
## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example
|
||||
## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206
|
||||
# command: "['/bin/sh', '-c', 'source /vault/secrets/config && <entrypoint script>']"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
clusterIP: ""
|
||||
loadBalancerIP: ""
|
||||
port: 80
|
||||
targetPort: 80
|
||||
# targetPort: 4181 To be used with a proxy extraContainer
|
||||
portName: http
|
||||
|
||||
annotations: {}
|
||||
## Special annotations at the service level, e.g
|
||||
## this will set vnet internal IP's rather than public ip's
|
||||
## service.beta.kubernetes.io/azure-load-balancer-internal: "true"
|
||||
|
||||
## Specify the nodePort value for the service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
|
||||
## Pod Service Account
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
# Opt out of API credential automounting.
|
||||
# If you don't want the kubelet to automatically mount a ServiceAccount's API credentials,
|
||||
# you can opt out of the default behavior
|
||||
automountServiceAccountToken: false
|
||||
|
||||
## Pod HostAliases
|
||||
## ref: https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
|
||||
##
|
||||
hostAliases:
|
||||
# - ip: "127.0.0.1"
|
||||
# hostnames:
|
||||
# - "pgadmin4.local"
|
||||
|
||||
## Strategy used to replace old Pods by new ones
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
##
|
||||
strategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 0
|
||||
# maxUnavailable: 1
|
||||
|
||||
## Server definitions will be loaded at launch time. This allows connection
|
||||
## information to be pre-loaded into the instance of pgAdmin4 in the container.
|
||||
## Note that server definitions are only loaded on first launch,
|
||||
## i.e. when the configuration database is created, and not on subsequent launches using the same configuration database.
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html
|
||||
##
|
||||
serverDefinitions:
|
||||
## If true, server definitions will be created
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## The resource type to use for deploying server definitions.
|
||||
## Can either be ConfigMap or Secret
|
||||
resourceType: ConfigMap
|
||||
|
||||
# If resource type is set to ConfigMap, specify existingConfigmap containing definitions
|
||||
existingConfigmap: ""
|
||||
|
||||
# If resource type is set to Secret, specify existingSecret containing definitions
|
||||
existingSecret: ""
|
||||
|
||||
servers:
|
||||
postgres:
|
||||
Name: "main"
|
||||
Group: "Servers"
|
||||
Port: 5432
|
||||
Username: "postgres"
|
||||
Host: "postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local"
|
||||
SSLMode: "prefer"
|
||||
MaintenanceDB: "postgres"
|
||||
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
|
||||
## Ingress
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.pgadmin4.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
ingressClassName: {{ .Values.globals.pgadmin4.ingressClass }}
|
||||
hosts:
|
||||
- host: {{ .Values.globals.pgadmin4.hostname }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- secretName: pg-hl-mnke-org-tls
|
||||
hosts:
|
||||
- {{ .Values.globals.pgadmin4.hostname }}
|
||||
|
||||
# Additional config maps to be mounted inside a container
|
||||
# Can be used to map config maps for sidecar as well
|
||||
extraConfigmapMounts: []
|
||||
# - name: certs-configmap
|
||||
# mountPath: /etc/ssl/certs
|
||||
# subPath: ""
|
||||
# configMap: certs-configmap
|
||||
# readOnly: true
|
||||
|
||||
extraSecretMounts: []
|
||||
# - name: pgpassfile
|
||||
# secret: pgpassfile
|
||||
# subPath: pgpassfile
|
||||
# mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass"
|
||||
# readOnly: true
|
||||
|
||||
## Additional volumes to be mounted inside a container
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
|
||||
## Specify additional containers in extraContainers.
|
||||
## For example, to add an authentication proxy to a pgadmin4 pod.
|
||||
extraContainers: |
|
||||
# - name: proxy
|
||||
# image: quay.io/gambol99/keycloak-proxy:latest
|
||||
# args:
|
||||
# - -provider=github
|
||||
# - -client-id=
|
||||
# - -client-secret=
|
||||
# - -github-org=<ORG_NAME>
|
||||
# - -email-domain=*
|
||||
# - -cookie-secret=
|
||||
# - -http-address=http://0.0.0.0:4181
|
||||
# - -upstream-url=http://127.0.0.1:3000
|
||||
# ports:
|
||||
# - name: proxy-web
|
||||
# containerPort: 4181
|
||||
|
||||
## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret.
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set.
|
||||
##
|
||||
secretKeys:
|
||||
pgadminPasswordKey: password
|
||||
|
||||
## pgAdmin4 startup configuration
|
||||
## Values in here get injected as environment variables
|
||||
## Needed chart reinstall for apply changes
|
||||
env:
|
||||
# can be email or nickname
|
||||
email: {{ .Values.globals.pgadmin4.email }}
|
||||
password: {{ .Values.globals.pgadmin4.password }}
|
||||
# pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass
|
||||
|
||||
# set context path for application (e.g. /pgadmin4/*)
|
||||
# contextPath: /pgadmin4
|
||||
|
||||
## If True, allows pgAdmin4 to create session cookies based on IP address
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html
|
||||
##
|
||||
enhanced_cookie_protection: "False"
|
||||
|
||||
## Add custom environment variables that will be injected to deployment
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
|
||||
##
|
||||
variables: []
|
||||
# - name: PGADMIN_LISTEN_ADDRESS
|
||||
# value: "0.0.0.0"
|
||||
# - name: PGADMIN_LISTEN_PORT
|
||||
# value: "8080"
|
||||
|
||||
## Additional environment variables from ConfigMaps
|
||||
envVarsFromConfigMaps: []
|
||||
# - array-of
|
||||
# - config-map-names
|
||||
|
||||
## Additional environment variables from Secrets
|
||||
envVarsFromSecrets: []
|
||||
# - array-of
|
||||
# - secret-names
|
||||
|
||||
## Additional environment variables
|
||||
envVarsExtra: []
|
||||
# - name: POSTGRES_USERNAME
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
|
||||
# key: username
|
||||
# - name: POSTGRES_PASSWORD
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
|
||||
# key: password
|
||||
|
||||
persistentVolume:
|
||||
## If true, pgAdmin4 will create/use a Persistent Volume Claim
|
||||
## If false, use emptyDir
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## pgAdmin4 Persistent Volume Claim annotations
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
## pgAdmin4 Persistent Volume access modes
|
||||
## Must match those of existing PV or dynamic provisioner
|
||||
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
accessModes:
|
||||
- {{ .Values.globals.pgadmin4.accessMode }}
|
||||
|
||||
## pgAdmin4 Persistent Volume Size
|
||||
##
|
||||
size: {{ .Values.globals.pgadmin4.storageSize }}
|
||||
|
||||
## pgAdmin4 Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: {{ .Values.globals.pgadmin4.storageClass }}
|
||||
# existingClaim: ""
|
||||
|
||||
## Subdirectory of pgAdmin4 Persistent Volume to mount
|
||||
## Useful if the volume's root directory is not empty
|
||||
##
|
||||
subPath: ""
|
||||
|
||||
## Additional volumes to be added to the deployment
|
||||
##
|
||||
extraVolumes: []
|
||||
|
||||
## Security context to be added to pgAdmin4 pods
|
||||
##
|
||||
securityContext:
|
||||
runAsUser: 5050
|
||||
runAsGroup: 5050
|
||||
fsGroup: 5050
|
||||
|
||||
containerSecurityContext:
|
||||
enabled: false
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
## pgAdmin4 readiness and liveness probe initial delay and timeout
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
##
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin.
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
##
|
||||
VolumePermissions:
|
||||
## If true, enables an InitContainer to set permissions on /var/lib/pgadmin.
|
||||
##
|
||||
enabled: false
|
||||
|
||||
## @param extraDeploy list of extra manifests to deploy
|
||||
##
|
||||
extraDeploy: []
|
||||
|
||||
## Additional InitContainers to initialize the pod
|
||||
##
|
||||
extraInitContainers: |
|
||||
# - name: add-folder-for-pgpass
|
||||
# image: "dpage/pgadmin4:latest"
|
||||
# command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"]
|
||||
# volumeMounts:
|
||||
# - name: pgadmin-data
|
||||
# mountPath: /var/lib/pgadmin
|
||||
# securityContext:
|
||||
# runAsUser: 5050
|
||||
|
||||
containerPorts:
|
||||
http: 80
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Horizontal Pod Autoscaling
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
|
||||
#
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
## Node labels for pgAdmin4 pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Pod affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
## Pod DNS Policy
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||
|
||||
dnsPolicy: ""
|
||||
|
||||
## Update pod DNS Config
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
|
||||
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 192.0.2.1
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "2"
|
||||
# - name: edns0
|
||||
|
||||
## Pod annotations
|
||||
##
|
||||
podAnnotations: {}
|
||||
templatedPodAnnotations: |-
|
||||
# checksum/configmap-oauth2:{{ `{{ include "<parent-chart-name>/templates/configmap-oauth2.yaml" $ | sha256sum }}` }}
|
||||
# checksum/secret-oauth2: "{{ `{{ include "<parent-chart-name>/templates/secret-oauth2.yaml" $ | sha256sum }}` }}"
|
||||
# checksum/secret-pgpass: "{{ `{{ include "<parent-chart-name>/templates/secret-pgpass.yaml" $ | sha256sum }}` }}"
|
||||
|
||||
## Pod labels
|
||||
##
|
||||
podLabels: {}
|
||||
# key1: value1
|
||||
# key2: value2
|
||||
|
||||
# -- The name of the Namespace to deploy
|
||||
# If not set, `.Release.Namespace` is used
|
||||
namespace: null
|
||||
|
||||
init:
|
||||
## Init container resources
|
||||
##
|
||||
resources: {}
|
||||
|
||||
## Define values for chart tests
|
||||
test:
|
||||
## Container image for test-connection.yaml
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: busybox
|
||||
tag: latest
|
||||
## Resources request/limit for test-connection Pod
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 50m
|
||||
# memory: 32Mi
|
||||
# requests:
|
||||
# cpu: 25m
|
||||
# memory: 16Mi
|
||||
## Security context for test-connection Pod
|
||||
securityContext:
|
||||
runAsUser: 5051
|
||||
runAsGroup: 5051
|
||||
fsGroup: 5051
|
||||
|
811
proxmox/k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl
Normal file
811
proxmox/k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl
Normal file
@ -0,0 +1,811 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: {{ .Values.globals.phpmyadmin.storageClass }}
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: disabled
|
||||
## @section Common parameters
|
||||
|
||||
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname template
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param commonLabels Add labels to all the deployed resources
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Add annotations to all the deployed resources
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Kubernetes Cluster Domain
|
||||
##
|
||||
clusterDomain: cluster.local
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## @section phpMyAdmin parameters
|
||||
|
||||
## Bitnami PhpMyAdmin image version
|
||||
## ref: https://hub.docker.com/r/bitnami/phpmyadmin/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] phpMyAdmin image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/phpmyadmin] phpMyAdmin image repository
|
||||
## @skip image.tag phpMyAdmin image tag (immutable tags are recommended)
|
||||
## @param image.digest phpMyAdmin image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Image pull policy
|
||||
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||
## @param image.debug Enable phpmyadmin image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/phpmyadmin
|
||||
tag: 5.2.2-debian-12-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Example:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Enable debug mode
|
||||
##
|
||||
debug: false
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param lifecycleHooks for the phpmyadmin container(s) to automate configuration before or after startup
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param extraEnvVars Extra environment variables to be set on PhpMyAdmin container
|
||||
## For example:
|
||||
## extraEnvVars:
|
||||
## - name: PHP_UPLOAD_MAX_FILESIZE
|
||||
## value: "80M"
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param extraEnvVarsCM Name of a existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Name of a existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @section phpMyAdmin deployment parameters
|
||||
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases [array] Deployment pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases:
|
||||
## Necessary for apache-exporter to work
|
||||
##
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "status.localhost"
|
||||
## phpMyAdmin container ports to open
|
||||
## @param containerPorts.http HTTP port to expose at container level
|
||||
## @param containerPorts.https HTTPS port to expose at container level
|
||||
##
|
||||
containerPorts:
|
||||
http: 8080
|
||||
https: 8443
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for phpMyAdmin container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## @param updateStrategy.type Strategy to use to update Pods
|
||||
##
|
||||
updateStrategy:
|
||||
## StrategyType
|
||||
## Can be set to RollingUpdate or OnDelete
|
||||
##
|
||||
type: RollingUpdate
|
||||
## phpMyAdmin pods' Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enable phpMyAdmin pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup User ID for the container
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## phpMyAdmin containers' Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## phpMyAdmin number of pod replicas
|
||||
## @param replicas Number of replicas
|
||||
replicas: 1
|
||||
## phpMyAdmin containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "micro"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## phpMyAdmin containers' startup probe. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.httpGet.path Request path for startupProbe
|
||||
## @param startupProbe.httpGet.port Port for startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
## phpMyAdmin containers' liveness probe. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.tcpSocket.port Port for livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
tcpSocket:
|
||||
port: http
|
||||
## phpMyAdmin containers' readiness probes. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.httpGet.path Request path for readinessProbe
|
||||
## @param readinessProbe.httpGet.port Port for readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
## @param customStartupProbe Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param customLivenessProbe Override default liveness probe
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## @param podLabels Extra labels for PhpMyAdmin pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param podAnnotations Annotations for PhpMyAdmin pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set.
|
||||
## E.g.
|
||||
## key: "kubernetes.io/e2e-az-name"
|
||||
##
|
||||
key: ""
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param tolerations Tolerations for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param priorityClassName phpmyadmin pods' priorityClassName
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param extraVolumes Optionally specify extra list of additional volumes for PhpMyAdmin pods
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for PhpMyAdmin container(s)
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param initContainers Add init containers to the PhpMyAdmin pods
|
||||
## Example:
|
||||
## initContainers:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param sidecars Add sidecar containers to the PhpMyAdmin pods
|
||||
## Example:
|
||||
## sidecars:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
sidecars: []
|
||||
## @section Traffic Exposure parameters
|
||||
|
||||
## Service configuration
|
||||
##
|
||||
service:
|
||||
## @param service.type Kubernetes Service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param service.ports.http Service HTTP port
|
||||
## @param service.ports.https Service HTTPS port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
## Specify the nodePort values for the LoadBalancer and NodePort service types
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
## @param service.nodePorts.http Kubernetes http node port
|
||||
## @param service.nodePorts.https Kubernetes https node port
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
## @param service.clusterIP PhpMyAdmin service clusterIP IP
|
||||
## e.g:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.loadBalancerIP Load balancer IP for the phpMyAdmin Service (optional, cloud specific)
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
|
||||
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## Example:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.externalTrafficPolicy Enable client source IP preservation
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.annotations Provide any additional annotations that may be required for the PhpMyAdmin service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Ingress configuration
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Set to true to enable ingress record generation
|
||||
##
|
||||
enabled: true
|
||||
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
|
||||
## certManager: false
|
||||
##
|
||||
|
||||
## @param ingress.hostname When the ingress is enabled, a host pointing to this will be created
|
||||
##
|
||||
hostname: {{ .Values.globals.phpmyadmin.hostname }}
|
||||
## @param ingress.pathType Ingress path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.path Default path for the ingress record
|
||||
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
|
||||
##
|
||||
path: /
|
||||
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
||||
## e.g:
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.phpmyadmin.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter
|
||||
## TLS certificates will be retrieved from a TLS secret with name: \{\{- printf "%s-tls" .Values.ingress.hostname \}\}
|
||||
## You can use the ingress.secrets parameter to create this TLS secret, relay on cert-manager to create it, or
|
||||
## let the chart create self-signed certificates for you
|
||||
##
|
||||
tls: true
|
||||
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
|
||||
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
|
||||
## Example:
|
||||
## extraHosts:
|
||||
## - name: phpmyadmin.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
|
||||
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## Example:
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - phpmyadmin.local
|
||||
## secretName: phpmyadmin.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets If you're providing your own certificates and want to manage the secret via helm,
|
||||
## please use this to add the certificates as secrets key and certificate should start with
|
||||
## -----BEGIN CERTIFICATE----- or REDACTED
|
||||
## name should line up with a secretName set further up
|
||||
##
|
||||
## If it is not set and you're using cert-manager, this is unneeded, as it will create the secret for you
|
||||
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
##
|
||||
## Example
|
||||
## secrets:
|
||||
## - name: phpmyadmin.local-tls
|
||||
## key: ""
|
||||
## certificate: ""
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.existingSecretName If you're providing your own certificate and want to manage the secret yourself,
|
||||
## please provide the name of the secret with this parameter. This secret will then be used for tls termination.
|
||||
## It has higher priority than the cert-manager or the generation of the certificate from the chart.
|
||||
##
|
||||
## Example:
|
||||
## existingSecretName: "byo-phpmyadmin-tls"
|
||||
##
|
||||
existingSecretName: ""
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: ""
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: phpmyadmin.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: phpmyadmin-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Database parameters
|
||||
|
||||
## Database configuration
|
||||
##
|
||||
db:
|
||||
## @param db.allowArbitraryServer Enable connection to arbitrary MySQL server
|
||||
## If you do not want the user to be able to specify an arbitrary MySQL server at login time, set this to false
|
||||
##
|
||||
allowArbitraryServer: true
|
||||
## @param db.port Database port to use to connect
|
||||
##
|
||||
port: 3306
|
||||
## @param db.chartName Database suffix if included in the same release
|
||||
## If you are deploying phpMyAdmin as part of a release and the database is part
|
||||
## of the release, you can pass a suffix that will be used to find the database
|
||||
## in releasename-dbSuffix. Please note that this setting precedes db.host
|
||||
## e.g:
|
||||
## chartName: mariadb
|
||||
##
|
||||
chartName: ""
|
||||
## @param db.host Database Hostname. Ignored when `db.chartName` is set.
|
||||
## e.g:
|
||||
## host: foo
|
||||
##
|
||||
host: "mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local"
|
||||
## @param db.bundleTestDB Deploy a MariaDB instance for testing purposes
|
||||
##
|
||||
bundleTestDB: false
|
||||
## @param db.enableSsl Enable SSL for the connection between phpMyAdmin and the database
|
||||
##
|
||||
enableSsl: false
|
||||
ssl:
|
||||
## @param db.ssl.clientKey Client key file when using SSL
|
||||
##
|
||||
clientKey: ""
|
||||
## @param db.ssl.clientCertificate Client certificate file when using SSL
|
||||
##
|
||||
clientCertificate: ""
|
||||
## @param db.ssl.caCertificate CA file when using SSL
|
||||
##
|
||||
caCertificate: ""
|
||||
## @param db.ssl.ciphers List of allowable ciphers for connections when using SSL
|
||||
##
|
||||
ciphers: []
|
||||
## @param db.ssl.verify Enable SSL certificate validation
|
||||
##
|
||||
verify: true
|
||||
## @param mariadb MariaDB chart configuration
|
||||
## https://github.com/bitnami/charts/blob/main/bitnami/mariadb/values.yaml
|
||||
##
|
||||
mariadb: {}
|
||||
## @section Other Parameters
|
||||
|
||||
## Service account for PhpMyAdmin to use.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
## @param serviceAccount.create Enable creation of ServiceAccount for PhpMyAdmin pod
|
||||
##
|
||||
create: true
|
||||
## @param serviceAccount.name The name of the ServiceAccount to use.
|
||||
## If not set and create is true, a name is generated using the common.names.fullname template
|
||||
##
|
||||
name: ""
|
||||
## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
||||
## Can be set to false if pods using this serviceAccount do not need to use K8s API
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
||||
##
|
||||
annotations: {}
|
||||
## @section Metrics parameters
|
||||
|
||||
## Prometheus Exporter / Metrics
|
||||
##
|
||||
metrics:
|
||||
## @param metrics.enabled Start a side-car prometheus exporter
|
||||
##
|
||||
enabled: true
|
||||
## @param metrics.image.registry [default: REGISTRY_NAME] Apache exporter image registry
|
||||
## @param metrics.image.repository [default: REPOSITORY_NAME/apache-exporter] Apache exporter image repository
|
||||
## @skip metrics.image.tag Apache exporter image tag (immutable tags are recommended)
|
||||
## @param metrics.image.digest Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param metrics.image.pullPolicy Image pull policy
|
||||
## @param metrics.image.pullSecrets Specify docker-registry secret names as an array
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/apache-exporter
|
||||
tag: 1.0.9-debian-12-r8
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Example:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "nano"
|
||||
## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
##
|
||||
resources: {}
|
||||
## Prometheus Exporter service configuration
|
||||
##
|
||||
service:
|
||||
## @param metrics.service.type Prometheus metrics service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param metrics.service.port Prometheus metrics service port
|
||||
##
|
||||
port: 9117
|
||||
## @param metrics.service.annotations [object] Annotations for Prometheus metrics service
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ `{{ .Values.metrics.service.port }}` }}"
|
||||
## @param metrics.service.clusterIP phpmyadmin service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param metrics.service.loadBalancerIP Load Balancer IP if the Prometheus metrics server type is `LoadBalancer`
|
||||
## Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param metrics.service.loadBalancerSourceRanges phpmyadmin service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param metrics.service.externalTrafficPolicy phpmyadmin service external traffic policy
|
||||
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Prometheus Service Monitor
|
||||
## ref: https://github.com/coreos/prometheus-operator
|
||||
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
|
||||
##
|
||||
serviceMonitor:
|
||||
## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
|
||||
##
|
||||
enabled: false
|
||||
## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created
|
||||
##
|
||||
namespace: ""
|
||||
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
|
||||
##
|
||||
jobLabel: ""
|
||||
## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped
|
||||
##
|
||||
interval: 30s
|
||||
## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
|
||||
## e.g:
|
||||
## scrapeTimeout: 30s
|
||||
##
|
||||
scrapeTimeout: ""
|
||||
## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
||||
##
|
||||
relabelings: []
|
||||
## @param metrics.serviceMonitor.metricRelabelings Specify Metric Relabelings to add to the scrape endpoint
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
||||
##
|
||||
metricRelabelings: []
|
||||
## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
|
||||
##
|
||||
labels: {}
|
||||
## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
|
||||
##
|
||||
honorLabels: false
|
||||
## @param metrics.serviceMonitor.selector ServiceMonitor selector labels
|
||||
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
|
||||
##
|
||||
## selector:
|
||||
## prometheus: my-prometheus
|
||||
##
|
||||
selector: {}
|
||||
## @section NetworkPolicy parameters
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
1936
proxmox/k8s/helmfile.d/values/postgres/values.yaml.gotmpl
Normal file
1936
proxmox/k8s/helmfile.d/values/postgres/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
132
proxmox/k8s/helmfile.d/values/rancher/values.yaml.gotmpl
Normal file
132
proxmox/k8s/helmfile.d/values/rancher/values.yaml.gotmpl
Normal file
@ -0,0 +1,132 @@
|
||||
# Additional Trusted CAs.
|
||||
# Enable this flag and add your CA certs as a secret named tls-ca-additional in the namespace.
|
||||
# See README.md for details.
|
||||
additionalTrustedCAs: false
|
||||
|
||||
antiAffinity: preferred
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Audit Logs
|
||||
# Source: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log
|
||||
# The audit log is piped to the console of the rancher-audit-log container in the rancher pod.
|
||||
# level: Verbosity of logs, 0 to 3. 0 is off, 3 most verbose.
|
||||
# Docs: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log#audit-log-levels
|
||||
auditLog:
|
||||
destination: sidecar
|
||||
hostPath: /var/log/rancher/audit/
|
||||
level: 0
|
||||
maxAge: 1
|
||||
maxBackup: 1
|
||||
maxSize: 100
|
||||
|
||||
# Image for collecting rancher audit logs.
|
||||
# Important: update pkg/image/export/resolve.go when this default image is changed, so that it's reflected accordingly in rancher-images.txt generated for air-gapped setups.
|
||||
image:
|
||||
repository: "rancher/mirrored-bci-micro"
|
||||
tag: 15.6.24.2
|
||||
# Override imagePullPolicy image
|
||||
# options: Always, Never, IfNotPresent
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# As of Rancher v2.5.0 this flag is deprecated and must be set to 'true' in order for Rancher to start
|
||||
addLocal: "true"
|
||||
|
||||
# Add debug flag to Rancher server
|
||||
debug: false
|
||||
|
||||
# When starting Rancher for the first time, bootstrap the admin as restricted-admin
|
||||
restrictedAdmin: false
|
||||
|
||||
# Control how the Rancher agents validate TLS connections
|
||||
# Valid options: strict, or system-store
|
||||
# Note, for new installations empty will default to strict on 2.9+, or system-store on 2.8 or older
|
||||
agentTLSMode: ""
|
||||
|
||||
# Extra environment variables passed to the rancher pods.
|
||||
# extraEnv:
|
||||
# - name: CATTLE_TLS_MIN_VERSION
|
||||
# value: "1.0"
|
||||
|
||||
# Fully qualified name to reach your Rancher server
|
||||
hostname: {{ .Values.globals.rancher.hostname }}
|
||||
|
||||
### ingress ###
|
||||
# Readme for details and instruction on adding tls secrets.
|
||||
ingress:
|
||||
# If set to false, ingress will not be created
|
||||
# Defaults to true
|
||||
# options: true, false
|
||||
enabled: true
|
||||
includeDefaultExtraAnnotations: true
|
||||
extraAnnotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.rancher.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
ingressClassName: {{ .Values.globals.rancher.ingressClass }}
|
||||
# backend port number
|
||||
servicePort: 80
|
||||
|
||||
tls:
|
||||
# options: rancher, letsEncrypt, secret
|
||||
source: secret
|
||||
secretName: rancher-tls
|
||||
|
||||
### service ###
|
||||
# Override to use NodePort or LoadBalancer service type - default is ClusterIP
|
||||
service:
|
||||
type: ""
|
||||
annotations: {}
|
||||
|
||||
### LetsEncrypt config ###
|
||||
# ProTip: The production environment only allows you to register a name 5 times a week.
|
||||
# Use staging until you have your config right.
|
||||
letsEncrypt:
|
||||
# email: none@example.com
|
||||
environment: {{ .Values.globals.certs.certIssuerMode }}
|
||||
ingress:
|
||||
# options: traefik, nginx
|
||||
class: {{ .Values.globals.rancher.ingressClass }}
|
||||
# If you are using certs signed by a private CA set to 'true' and set the 'tls-ca'
|
||||
# in the 'rancher-system' namespace. See the README.md for details
|
||||
privateCA: false
|
||||
|
||||
# http[s] proxy server passed into rancher server.
|
||||
# proxy: http://<username>@<password>:<url>:<port>
|
||||
|
||||
# comma separated list of domains or ip addresses that will not use the proxy
|
||||
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
|
||||
|
||||
# Override rancher image location for Air Gap installs
|
||||
rancherImage: rancher/rancher
|
||||
# rancher/rancher image tag. https://hub.docker.com/r/rancher/rancher/tags/
|
||||
# Defaults to .Chart.appVersion
|
||||
# rancherImageTag: v2.0.7
|
||||
|
||||
# Override imagePullPolicy for rancher server images
|
||||
# options: Always, Never, IfNotPresent
|
||||
# Defaults to IfNotPresent
|
||||
# rancherImagePullPolicy: <pullPolicy>
|
||||
|
||||
# Number of Rancher server replicas. Setting to negative number will dynamically between 0 and the abs(replicas) based on available nodes.
|
||||
# of available nodes in the cluster
|
||||
replicas: {{ .Values.globals.rancher.replicas }}
|
||||
|
||||
# Set priorityClassName to avoid eviction
|
||||
priorityClassName: rancher-critical
|
||||
|
||||
# Set pod resource requests/limits for Rancher.
|
||||
resources: {}
|
||||
|
||||
#
|
||||
# tls
|
||||
# Where to offload the TLS/SSL encryption
|
||||
# - ingress (default)
|
||||
# - external
|
||||
tls: ingress
|
||||
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
# Set to use the packaged system charts
|
||||
useBundledSystemChart: false
|
||||
|
||||
# Set a bootstrap password. If leave empty, a random password will be generated.
|
||||
bootstrapPassword: {{ .Values.globals.rancher.bootstrapPassword }}
|
2236
proxmox/k8s/helmfile.d/values/redis/values.yaml.gotmpl
Normal file
2236
proxmox/k8s/helmfile.d/values/redis/values.yaml.gotmpl
Normal file
File diff suppressed because it is too large
Load Diff
61
proxmox/k8s/helmfile.d/values/traefik/values.yaml.gotmpl
Normal file
61
proxmox/k8s/helmfile.d/values/traefik/values.yaml.gotmpl
Normal file
@ -0,0 +1,61 @@
|
||||
globalArguments:
|
||||
- "--global.sendanonymoususage=false"
|
||||
- "--global.checknewversion=false"
|
||||
|
||||
additionalArguments:
|
||||
- "--serversTransport.insecureSkipVerify=true"
|
||||
- "--log.level=INFO"
|
||||
|
||||
deployment:
|
||||
enabled: true
|
||||
replicas: 3
|
||||
annotations: {}
|
||||
podAnnotations: {}
|
||||
additionalContainers: []
|
||||
initContainers: []
|
||||
|
||||
ports:
|
||||
web:
|
||||
redirections:
|
||||
entrypoint:
|
||||
to: websecure
|
||||
scheme: https
|
||||
permanent: true
|
||||
websecure:
|
||||
http3:
|
||||
enabled: true
|
||||
advertisedPort: 4443
|
||||
tls:
|
||||
enabled: true
|
||||
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: false
|
||||
|
||||
ingressClass:
|
||||
name: {{ .Values.globals.traefik.ingressClass }}
|
||||
providers:
|
||||
kubernetesCRD:
|
||||
enabled: true
|
||||
ingressClass: {{ .Values.globals.traefik.ingressClass }}
|
||||
allowExternalNameServices: true
|
||||
kubernetesIngress:
|
||||
enabled: true
|
||||
ingressClass: {{ .Values.globals.traefik.ingressClass }}
|
||||
allowExternalNameServices: true
|
||||
publishedService:
|
||||
enabled: false
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
type: LoadBalancer
|
||||
annotations: {}
|
||||
labels: {}
|
||||
spec:
|
||||
loadBalancerIP: {{ .Values.globals.traefik.loadBalancerIP }}
|
||||
loadBalancerSourceRanges: []
|
||||
externalIPs: []
|
||||
|
234
proxmox/k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl
Normal file
234
proxmox/k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl
Normal file
@ -0,0 +1,234 @@
|
||||
# Default values for uptime-kuma.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: louislam/uptime-kuma
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "1.23.13-debian"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
# -- A custom namespace to override the default namespace for the deployed resources.
|
||||
namespaceOverride: ""
|
||||
|
||||
# If this option is set to false a StateFulset instead of a Deployment is used
|
||||
useDeploy: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels:
|
||||
{}
|
||||
# app: uptime-kuma
|
||||
podEnv: []
|
||||
# optional additional environment variables
|
||||
# - name: "A_VARIABLE"
|
||||
# value: "a-value"
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 3001
|
||||
nodePort:
|
||||
annotations: {}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
className: {{ .Values.globals.uptimeKuma.ingressClass }}
|
||||
extraLabels:
|
||||
{}
|
||||
# vhost: uptime-kuma.company.corp
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.uptimeKuma.ingressClass }}
|
||||
# nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
# nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
# nginx.ingress.kubernetes.io/server-snippets: |
|
||||
# location / {
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header X-Forwarded-Host $http_host;
|
||||
# proxy_set_header X-Forwarded-Proto $scheme;
|
||||
# proxy_set_header X-Forwarded-For $remote_addr;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header Connection "upgrade";
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_cache_bypass $http_upgrade;
|
||||
# }
|
||||
hosts:
|
||||
{{- range .Values.globals.uptimeKuma.hosts }}
|
||||
- host: {{ . }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
{{- end}}
|
||||
|
||||
tls:
|
||||
[]
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
# Uptime-Kuma recommends to configure a delay of 180 seconds until the server fully started.
|
||||
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.go#L3
|
||||
initialDelaySeconds: 180
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
# The NodeJS Version of this Healthcheck is no longer supported, therefore we don't specify a node command.
|
||||
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.js#L6
|
||||
exec:
|
||||
command:
|
||||
- "extra/healthcheck"
|
||||
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
exec:
|
||||
command: []
|
||||
httpGet:
|
||||
path: /
|
||||
port: 3001
|
||||
scheme: HTTP
|
||||
httpHeaders: []
|
||||
|
||||
volume:
|
||||
enabled: true
|
||||
accessMode: ReadWriteMany
|
||||
size: 4Gi
|
||||
# If you want to use a storage class other than the default, uncomment this
|
||||
# line and define the storage class name
|
||||
storageClassName: {{ .Values.globals.uptimeKuma.storageClass }}
|
||||
# Reuse your own pre-existing PVC.
|
||||
existingClaim: ""
|
||||
|
||||
# -- A list of additional volumes to be added to the pod
|
||||
additionalVolumes:
|
||||
[]
|
||||
# - name: "additional-certificates"
|
||||
# configMap:
|
||||
# name: "additional-certificates"
|
||||
# optional: true
|
||||
# defaultMode: 420
|
||||
|
||||
# -- A list of additional volumeMounts to be added to the pod
|
||||
additionalVolumeMounts:
|
||||
[]
|
||||
# - name: "additional-certificates"
|
||||
# mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
|
||||
# readOnly: true
|
||||
# subPath: "additional-ca.pem"
|
||||
|
||||
strategy:
|
||||
type: Recreate
|
||||
|
||||
# Prometheus ServiceMonitor configuration
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||||
interval: 60s
|
||||
# -- Timeout if metrics can't be retrieved in given time interval
|
||||
scrapeTimeout: 10s
|
||||
# -- Scheme to use when scraping, e.g. http (default) or https.
|
||||
scheme: ~
|
||||
# -- TLS configuration to use when scraping, only applicable for scheme https.
|
||||
tlsConfig: {}
|
||||
# -- Prometheus [RelabelConfigs] to apply to samples before scraping
|
||||
relabelings: []
|
||||
# -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
|
||||
metricRelabelings: []
|
||||
# -- Prometheus ServiceMonitor selector, only select Prometheus's with these
|
||||
# labels (if not set, select any Prometheus)
|
||||
selector: {}
|
||||
|
||||
# -- Namespace where the ServiceMonitor resource should be created, default is
|
||||
# the same as the release namespace
|
||||
namespace: ~
|
||||
# -- Additional labels to add to the ServiceMonitor
|
||||
additionalLabels: {}
|
||||
# -- Additional annotations to add to the ServiceMonitor
|
||||
annotations: {}
|
||||
|
||||
# -- BasicAuth credentials for scraping metrics, use API token and any string for username
|
||||
# basicAuth:
|
||||
# username: "metrics"
|
||||
# password: ""
|
||||
|
||||
# -- Use this option to set a custom DNS policy to the created deployment
|
||||
dnsPolicy: ""
|
||||
|
||||
# -- Use this option to set custom DNS configurations to the created deployment
|
||||
dnsConfig: {}
|
||||
|
||||
# -- Use this option to set custom PriorityClass to the created deployment
|
||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
|
||||
priorityClassName: ""
|
||||
|
||||
# -- Create a NetworkPolicy
|
||||
networkPolicy:
|
||||
# -- Enable/disable Network Policy
|
||||
enabled: false
|
||||
# -- Enable/disable Ingress policy type
|
||||
ingress: true
|
||||
# -- Enable/disable Egress policy type
|
||||
egress: true
|
||||
# -- Allow incoming connections only from specific Pods
|
||||
# When set to true, the geoserver will accept connections from any source.
|
||||
# When false, only Pods with the label \{\{ include "geoserver.fullname" . \}\}-client=true will have network access
|
||||
allowExternal: true
|
||||
# -- Selects particular namespaces for which all Pods are allowed as ingress sources
|
||||
namespaceSelector: {}
|
||||
# matchLabels:
|
||||
# role: frontend
|
||||
# matchExpressions:
|
||||
# - {key: role, operator: In, values: [frontend]}
|
||||
|
22
proxmox/k8s/manifests/dns/configmap.yml
Normal file
22
proxmox/k8s/manifests/dns/configmap.yml
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
ready
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
forward . 10.0.123.123
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
|
4
proxmox/tf/.gitignore
vendored
Normal file
4
proxmox/tf/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
credentials.auto.tfvars
|
||||
terraform.tfstate*
|
||||
.terraform/
|
||||
.terraform.lock.hcl
|
20
proxmox/tf/backend.tf
Normal file
20
proxmox/tf/backend.tf
Normal file
@ -0,0 +1,20 @@
|
||||
terraform {
|
||||
backend "s3" {
|
||||
bucket = "tfstate"
|
||||
|
||||
endpoints = {
|
||||
s3 = var.s3_backend_endpoint
|
||||
}
|
||||
|
||||
key = "homelab.tfstate"
|
||||
access_key = var.s3_access_key
|
||||
secret_key = var.s3_secret_key
|
||||
|
||||
region = "main" # Region validation will be skipped
|
||||
skip_credentials_validation = true # Skip AWS related checks and validations
|
||||
skip_requesting_account_id = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
use_path_style = true # Enable path-style S3 URLs (https://<HOST>/<BUCKET> https://developer.hashicorp.com/terraform/language/settings/backends/s3#use_path_style
|
||||
}
|
||||
}
|
11
proxmox/tf/cloud-image.tf
Normal file
11
proxmox/tf/cloud-image.tf
Normal file
@ -0,0 +1,11 @@
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_file {
|
||||
path = "https://cloud-images.ubuntu.com/noble/20250122/noble-server-cloudimg-amd64.img"
|
||||
|
||||
checksum = "482244b83f49a97ee61fb9b8520d6e8b9c2e3c28648de461ba7e17681ddbd1c9"
|
||||
}
|
||||
}
|
40
proxmox/tf/cloud-init.tf
Normal file
40
proxmox/tf/cloud-init.tf
Normal file
@ -0,0 +1,40 @@
|
||||
data "local_file" "ssh_pub_key" {
|
||||
filename = "${path.module}/data/id_rsa.pub"
|
||||
}
|
||||
|
||||
locals {
|
||||
common_cloud_init = <<EOF
|
||||
#cloud-config
|
||||
chpasswd:
|
||||
list: |
|
||||
ubuntu:ubuntu
|
||||
${var.username}:${var.username}
|
||||
expire: false
|
||||
packages:
|
||||
- qemu-guest-agent
|
||||
- nfs-common
|
||||
- avahi-daemon
|
||||
timezone: America/Vancouver
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: ubuntu
|
||||
groups: sudo
|
||||
shell: /bin/bash
|
||||
ssh-authorized-keys:
|
||||
- ${trimspace(data.local_file.ssh_pub_key.content)}
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
- name: ${var.username}
|
||||
groups: sudo
|
||||
shell: /bin/bash
|
||||
ssh_import_id:
|
||||
- ${var.ssh_import_id}
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
|
||||
power_state:
|
||||
delay: now
|
||||
mode: reboot
|
||||
message: Rebooting after cloud-init completion
|
||||
condition: true
|
||||
EOF
|
||||
}
|
1
proxmox/tf/data/id_rsa.pub
Symbolic link
1
proxmox/tf/data/id_rsa.pub
Symbolic link
@ -0,0 +1 @@
|
||||
../../../data/ssh/id_rsa.pub
|
14
proxmox/tf/dns-server.tf
Normal file
14
proxmox/tf/dns-server.tf
Normal file
@ -0,0 +1,14 @@
|
||||
module "dns_server" {
|
||||
source = "${path.module}/modules/dns-server"
|
||||
|
||||
vm_id = "200"
|
||||
|
||||
ipv4_address = "10.0.123.123/16"
|
||||
pool_id = proxmox_virtual_environment_pool.core.id
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
17
proxmox/tf/docker-swarm.tf
Normal file
17
proxmox/tf/docker-swarm.tf
Normal file
@ -0,0 +1,17 @@
|
||||
module "docker_swarm_stingray" {
|
||||
source = "${path.module}/modules/docker-swarm"
|
||||
|
||||
swarm_name = "stingray"
|
||||
vm_id_prefix = "8"
|
||||
subnet_cidr = "10.0.42.0/24"
|
||||
gateway = "10.0.0.1"
|
||||
manager_count = 1
|
||||
worker_count = 2
|
||||
storage_size = 32
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
39
proxmox/tf/k8s.tf
Normal file
39
proxmox/tf/k8s.tf
Normal file
@ -0,0 +1,39 @@
|
||||
module "k8s_dolo" {
|
||||
source = "${path.module}/modules/k8s"
|
||||
started = true
|
||||
|
||||
cluster_name = "dolo"
|
||||
vm_id_prefix = "1"
|
||||
subnet_cidr = "10.0.185.0/24"
|
||||
gateway = "10.0.0.1"
|
||||
control_plane_count = 3
|
||||
worker_count = 3
|
||||
storage_worker_count = 3
|
||||
storage_size = 32
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
||||
|
||||
module "k8s_folly" {
|
||||
source = "${path.module}/modules/k8s"
|
||||
|
||||
started = false
|
||||
|
||||
cluster_name = "folly"
|
||||
vm_id_prefix = "2"
|
||||
subnet_cidr = "10.0.186.0/24"
|
||||
control_plane_count = 0
|
||||
worker_count = 0
|
||||
storage_worker_count = 0
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
||||
|
91
proxmox/tf/modules/dns-server/main.tf
Normal file
91
proxmox/tf/modules/dns-server/main.tf
Normal file
@ -0,0 +1,91 @@
|
||||
locals {
|
||||
dns_server = {
|
||||
name = "dns-server"
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "dns_server" {
|
||||
name = local.dns_server.name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "dns-server"]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = var.vm_id
|
||||
pool_id = var.pool_id
|
||||
|
||||
cpu {
|
||||
cores = 1
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 512
|
||||
floating = 512
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
# This should be one of the first nodes to start up to provide DNS globally
|
||||
startup {
|
||||
order = "0"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 16
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = var.ipv4_address
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.dns_server.id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "dns_server" {
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.dns_server.name}
|
||||
EOF
|
||||
file_name = "${local.dns_server.name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_host" "dns_server" {
|
||||
# Use mDNS rather than IP
|
||||
name = "${local.dns_server.name}.local"
|
||||
groups = ["core", "dns_server"]
|
||||
}
|
13
proxmox/tf/modules/dns-server/providers.tf
Normal file
13
proxmox/tf/modules/dns-server/providers.tf
Normal file
@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.70.0"
|
||||
}
|
||||
ansible = {
|
||||
source = "ansible/ansible"
|
||||
version = "1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
34
proxmox/tf/modules/dns-server/variables.tf
Normal file
34
proxmox/tf/modules/dns-server/variables.tf
Normal file
@ -0,0 +1,34 @@
|
||||
variable "common_cloud_init" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
|
||||
variable "cloud_image_id" {
|
||||
type = string
|
||||
description = "Cloud image to use"
|
||||
}
|
||||
|
||||
variable "proxmox_image_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "proxmox_vm_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "gateway" {
|
||||
type = string
|
||||
default = "10.0.0.1"
|
||||
}
|
||||
|
||||
variable "ipv4_address" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "pool_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_id" {
|
||||
type = string
|
||||
}
|
283
proxmox/tf/modules/docker-swarm/main.tf
Normal file
283
proxmox/tf/modules/docker-swarm/main.tf
Normal file
@ -0,0 +1,283 @@
|
||||
locals {
|
||||
managers = [
|
||||
for i in range(var.manager_count) : {
|
||||
name = "${var.swarm_name}-manager-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
manager_storage_dummies = [
|
||||
for i in range(var.manager_count) : {
|
||||
name = "${var.swarm_name}-manager-${format("%02s", i + 1)}-dummy"
|
||||
}
|
||||
]
|
||||
workers = [
|
||||
for i in range(var.worker_count) : {
|
||||
name = "${var.swarm_name}-worker-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
worker_storage_dummies = [
|
||||
for i in range(var.worker_count) : {
|
||||
name = "${var.swarm_name}-worker-${format("%02s", i + 1)}-dummy"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_pool" "swarm_pool" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = var.swarm_name
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_manager_dummy" {
|
||||
count = var.manager_count
|
||||
name = local.manager_storage_dummies[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "disk-dummy", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 201}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
started = false
|
||||
on_boot = false
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_format = "qcow2"
|
||||
interface = "scsi0"
|
||||
size = var.storage_size
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
name = local.managers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "swarm-manager", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 101}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
cpu {
|
||||
cores = 2
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 2048
|
||||
floating = 2048
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "1"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
dynamic "disk" {
|
||||
for_each = { for idx, val in proxmox_virtual_environment_vm.swarm_manager_dummy[count.index].disk : idx => val }
|
||||
iterator = data_disk
|
||||
content {
|
||||
datastore_id = data_disk.value["datastore_id"]
|
||||
path_in_datastore = data_disk.value["path_in_datastore"]
|
||||
file_format = data_disk.value["file_format"]
|
||||
size = data_disk.value["size"]
|
||||
# assign from scsi1 and up
|
||||
interface = "scsi${data_disk.key + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.32 - x.x.x.39
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.swarm_manager[count.index].id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.managers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.managers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
# This is currently how we create "disks" that are independent of a VM: by
|
||||
# creating a dummy VM with a disk and then attaching the disk. This way, we
|
||||
# can destroy the real VM without destroying the disk.
|
||||
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_vm#example-attached-disks
|
||||
resource "proxmox_virtual_environment_vm" "swarm_worker_dummy" {
|
||||
count = var.worker_count
|
||||
name = local.worker_storage_dummies[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "disk-dummy", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 401}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
started = false
|
||||
on_boot = false
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_format = "qcow2"
|
||||
interface = "scsi0"
|
||||
size = var.storage_size
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
name = local.workers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "swarm-worker", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 301}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
cpu {
|
||||
cores = 4
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 8192
|
||||
floating = 8192
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "2"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
# scsi on these guys for hotplugging and resizing
|
||||
dynamic "disk" {
|
||||
for_each = { for idx, val in proxmox_virtual_environment_vm.swarm_worker_dummy[count.index].disk : idx => val }
|
||||
iterator = data_disk
|
||||
content {
|
||||
datastore_id = data_disk.value["datastore_id"]
|
||||
path_in_datastore = data_disk.value["path_in_datastore"]
|
||||
file_format = data_disk.value["file_format"]
|
||||
size = data_disk.value["size"]
|
||||
# assign from scsi1 and up
|
||||
interface = "scsi${data_disk.key + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.40 - x.x.x.55
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 40)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.swarm_worker[count.index].id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_host" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
name = "${local.managers[count.index].name}.local"
|
||||
groups = ["${var.swarm_name}_manager", var.swarm_name]
|
||||
variables = {
|
||||
ipv4_address = proxmox_virtual_environment_vm.swarm_manager[count.index].ipv4_addresses[1][0]
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_host" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
name = "${local.workers[count.index].name}.local"
|
||||
groups = ["${var.swarm_name}_worker", var.swarm_name]
|
||||
variables = {
|
||||
ipv4_address = proxmox_virtual_environment_vm.swarm_worker[count.index].ipv4_addresses[1][0]
|
||||
}
|
||||
}
|
13
proxmox/tf/modules/docker-swarm/providers.tf
Normal file
13
proxmox/tf/modules/docker-swarm/providers.tf
Normal file
@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.70.0"
|
||||
}
|
||||
ansible = {
|
||||
source = "ansible/ansible"
|
||||
version = "1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
55
proxmox/tf/modules/docker-swarm/variables.tf
Normal file
55
proxmox/tf/modules/docker-swarm/variables.tf
Normal file
@ -0,0 +1,55 @@
|
||||
variable "proxmox_image_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "proxmox_vm_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_id_prefix" {
|
||||
type = number
|
||||
description = "Prefix for the vm ids in the cluster"
|
||||
}
|
||||
|
||||
variable "gateway" {
|
||||
type = string
|
||||
default = "10.0.0.1"
|
||||
}
|
||||
variable "swarm_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "manager_count" {
|
||||
type = number
|
||||
validation {
|
||||
condition = var.manager_count <= 8
|
||||
error_message = "Too many manager nodes"
|
||||
}
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = number
|
||||
validation {
|
||||
condition = var.worker_count <= 16
|
||||
error_message = "Too many worker nodes"
|
||||
}
|
||||
}
|
||||
|
||||
variable "storage_size" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "common_cloud_init" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
|
||||
variable "cloud_image_id" {
|
||||
type = string
|
||||
description = "Cloud image to use"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
type = string
|
||||
# example "10.0.185.0/24"
|
||||
}
|
345
proxmox/tf/modules/k8s/main.tf
Normal file
345
proxmox/tf/modules/k8s/main.tf
Normal file
@ -0,0 +1,345 @@
|
||||
locals {
|
||||
control_plane = [
|
||||
for i in range(var.control_plane_count) : {
|
||||
name = "${var.cluster_name}-cp-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
workers = [
|
||||
for i in range(var.worker_count) : {
|
||||
name = "${var.cluster_name}-worker-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
storage_workers = [
|
||||
for i in range(var.storage_worker_count) : {
|
||||
name = "${var.cluster_name}-storage-worker-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
storage_dummies = [
|
||||
for i in range(var.storage_worker_count) : {
|
||||
name = "${var.cluster_name}-storage-worker-${format("%02s", i + 1)}-dummy"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_pool" "k8s_pool" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = var.cluster_name
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "k8s_control_plane" {
|
||||
count = var.control_plane_count
|
||||
name = local.control_plane[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "k8s-cp", var.cluster_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 101}"
|
||||
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
|
||||
|
||||
started = var.started
|
||||
|
||||
cpu {
|
||||
cores = 2
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 2048
|
||||
floating = 2048
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "3"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 16
|
||||
file_format = "raw"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.32 - x.x.x.47
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_control_plane[count.index].id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "k8s_worker" {
|
||||
count = var.worker_count
|
||||
name = local.workers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "k8s-node", var.cluster_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 201}"
|
||||
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
|
||||
|
||||
started = var.started
|
||||
|
||||
cpu {
|
||||
cores = 4
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 8192
|
||||
floating = 8192
|
||||
}
|
||||
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "4"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "raw"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.48 - x.x.x.79
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 48)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_worker[count.index].id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_control_plane" {
|
||||
count = var.control_plane_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.control_plane[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.control_plane[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_worker" {
|
||||
count = var.worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_storage_worker" {
|
||||
count = var.storage_worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.storage_workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.storage_workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
# This is currently how we create "disks" that are independent of a VM: by
|
||||
# creating a dummy VM with a disk and then attaching the disk. This way, we
|
||||
# can destroy the real VM without destroying the disk.
|
||||
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_vm#example-attached-disks
|
||||
resource "proxmox_virtual_environment_vm" "k8s_storage_dummy" {
|
||||
count = var.storage_worker_count
|
||||
name = local.storage_dummies[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "disk-dummy", var.cluster_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 401}"
|
||||
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
|
||||
|
||||
started = false
|
||||
on_boot = false
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_format = "qcow2"
|
||||
interface = "scsi0"
|
||||
size = var.storage_size
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "k8s_storage_worker" {
|
||||
count = var.storage_worker_count
|
||||
name = local.storage_workers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "k8s-node", var.cluster_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 301}"
|
||||
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
|
||||
|
||||
started = var.started
|
||||
|
||||
cpu {
|
||||
cores = 4
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 8192
|
||||
floating = 8192
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "4"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "scsi0"
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
# scsi on these guys for hotplugging and resizing
|
||||
dynamic "disk" {
|
||||
for_each = { for idx, val in proxmox_virtual_environment_vm.k8s_storage_dummy[count.index].disk : idx => val }
|
||||
iterator = data_disk
|
||||
content {
|
||||
datastore_id = data_disk.value["datastore_id"]
|
||||
path_in_datastore = data_disk.value["path_in_datastore"]
|
||||
file_format = data_disk.value["file_format"]
|
||||
size = data_disk.value["size"]
|
||||
# assign from scsi1 and up
|
||||
interface = "scsi${data_disk.key + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.80 - x.x.x.87
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 80)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_storage_worker[count.index].id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_host" "k8s_control_plane" {
|
||||
count = var.control_plane_count
|
||||
# Use mDNS rather than IP
|
||||
name = "${local.control_plane[count.index].name}.local"
|
||||
groups = ["${var.cluster_name}_master", var.cluster_name]
|
||||
}
|
||||
|
||||
resource "ansible_host" "k8s_worker" {
|
||||
count = var.worker_count
|
||||
# Use mDNS rather than IP
|
||||
name = "${local.workers[count.index].name}.local"
|
||||
groups = ["${var.cluster_name}_node", var.cluster_name]
|
||||
}
|
||||
|
||||
resource "ansible_host" "k8s_storage_worker" {
|
||||
count = var.storage_worker_count
|
||||
# Use mDNS rather than IP
|
||||
name = "${local.storage_workers[count.index].name}.local"
|
||||
groups = ["${var.cluster_name}_node", "${var.cluster_name}_storage", var.cluster_name]
|
||||
}
|
||||
|
||||
resource "ansible_group" "cluster" {
|
||||
name = "${var.cluster_name}_k3s_cluster"
|
||||
children = ["${var.cluster_name}_master", "${var.cluster_name}_node"]
|
||||
}
|
13
proxmox/tf/modules/k8s/providers.tf
Normal file
13
proxmox/tf/modules/k8s/providers.tf
Normal file
@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.70.0"
|
||||
}
|
||||
ansible = {
|
||||
source = "ansible/ansible"
|
||||
version = "1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user