$re = '/\s*registry:\s*(?<registryUrl>.*)\n\s+repository:\s+(?<depName>.*)\n\s+tag:\s\"?(?<currentValue>[^\"\n]+)\"?\s*\n/m';
$str = '# Default values for sylva-units.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# generic helm chart release name overrides
nameOverride: ""
fullnameOverride: ""
git_repo_spec_default:
interval: 168h
timeout: 5m
oci_repo_spec_default:
interval: 168h
timeout: 5m
source_templates: # template to generate Flux GitRepository/OCIRepository resources
# <repo-name>:
# kind: GitRepository/OCIRepository
# spec: # partial spec for a Flux resource
# url: https://gitlab.com/sylva-projects/sylva-core.git
# #secretRef: # is autogenerated based on \'auth\'
# ref: # can be overridden per-unit, with \'ref_override\'
# branch: main
# auth: # optional \'username\'/\'password\' dict containing git authentication information
sylva-core:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-core.git
ref:
# the sylva-core framework will always override this ref so that the
# currently checked out commit of sylva-core is used by sylva-units
# (you can grep the code for "CURRENT_COMMIT" to find out how)
commit: not-a-real-commit
weave-gitops:
kind: GitRepository
spec:
url: https://github.com/weaveworks/weave-gitops.git
ref:
tag: v0.38.0
sylva-capi-cluster:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-capi-cluster.git
ref:
tag: 0.12.4
sync-openstack-images:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sync-openstack-images.git
ref:
tag: 0.7.0
devnull:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/devnull.git
ref:
tag: 0.1.0
local-path-provisioner:
kind: GitRepository
spec:
url: https://github.com/rancher/local-path-provisioner.git
ref:
tag: v0.0.32
sriov-resources:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sriov-resources.git
ref:
tag: 0.0.5
metallb-resources:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/metallb-resources.git
ref:
tag: 0.2.1
os-image-server:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/os-image-server.git
ref:
tag: 2.6.0
capo-contrail-bgpaas:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/capo-contrail-bgpaas.git
ref:
tag: 1.3.0
libvirt-metal:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/container-images/libvirt-metal.git
ref:
tag: 0.3.0
vault-operator:
kind: GitRepository
spec:
url: https://github.com/bank-vaults/vault-operator.git
ref:
tag: v1.23.0
sylva-dashboards:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-dashboards.git
ref:
tag: 0.2.0
minio-operator:
kind: GitRepository
spec:
url: https://github.com/minio/operator.git
ref:
tag: v7.1.1
sylva-snmp-resources:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-snmp-resources.git
ref:
tag: 0.2.0
sylva-logging-flows:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows.git
ref:
tag: 0.2.0
sylva-prometheus-rules:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-prometheus-rules.git
ref:
tag: 0.2.1
sylva-thanos-rules:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-thanos-rules.git
ref:
tag: 0.3.0
sylva-alertmanager-resources:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-alertmanager-resources.git
ref:
tag: 0.2.0
workload-team-defs:
kind: GitRepository
spec:
url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/workload-team-defs.git
ref:
tag: 0.4.0
bitnami-postgresql:
kind: GitRepository
spec:
url: https://github.com/bitnami/charts.git
ref:
tag: postgresql/15.5.36
bitnami-postgresql-ha:
kind: GitRepository
spec:
url: https://github.com/bitnami/charts.git
ref:
tag: postgresql-ha/14.2.30
bitnami-thanos:
kind: GitRepository
spec:
url: https://github.com/bitnami/charts.git
ref:
tag: thanos/15.8.0
bitnami-redis-cluster:
kind: GitRepository
spec:
url: https://github.com/bitnami/charts.git
ref:
tag: redis-cluster/11.0.8
logging-chart-repository:
kind: GitRepository
spec:
url: https://github.com/kube-logging/logging-operator.git
ref:
tag: 6.0.2
helm_repo_spec_default:
interval: 168h
timeout: 5m
# this defines the default .spec for a Kustomization resource
# generated for each item of \'units\'
unit_kustomization_spec_default: # default .spec for a Kustomization
force: false
prune: true
interval: 24h
retryInterval: 1m
timeout: 30s
# this defines the default .spec for a HelmRelease resource
# generated a unit with a "helmrelease_spec" field
unit_helmrelease_spec_default: # default for the .spec of a HelmRelease
driftDetection:
mode: enabled
interval: 24h
# the following dependsOn is used to prevent periodic reconciliation during the upgrades:
# As root-dependency-<n> HelmRelease will be pruned when root-dependency-<n+1> will be
# reconciled, HelmRelease reconciliation for a unit X will be blocked until the Kustomization
# controlling X reconciles and updates the dependsOn to refer to root-dependency-<n+1> HelmRelease
#
# (see also unit.root-dependency below)
dependsOn:
- >-
{{ tuple (dict "name" (printf "root-dependency-%d" .Release.Revision))
(tuple . "root-dependency" | include "unit-enabled")
| include "set-only-if"
}}
upgrade:
crds: CreateReplace
maxHistory: 2
# this defines the default .spec for a Kustomization resource containing the HelmRelease resource
# generated by a unit with a "helmrelease_spec" field
unit_helmrelease_kustomization_spec_default:
path: ./kustomize-units/helmrelease-generic
sourceRef:
name: sylva-core
kind: \'{{ index .Values.source_templates "sylva-core" | dig "kind" "GitRepository" }}\'
wait: true
# default value used if units.xxx.enabled is not specified
units_enabled_default: false
# unit_template define unit settings
# a unit can inherit from multiple of those
unit_templates:
sylva-units: {} # Empty template that will be overwritten in use-oci-artifacts.values.yaml
# dummy unit template is used to have a Kustomization
# to add dependencies
dummy:
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/dummy/base
wait: false
namespace-defs:
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/namespace-defs
wait: true
prune: false
_components:
- \'{{ tuple "components/cinder-csi" (tuple . "cinder-csi" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/metal3" (tuple . "metal3" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/longhorn" (tuple . "longhorn" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/harbor" (tuple . "harbor" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/rancher" (tuple . "rancher" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/sriov" (tuple . "sriov-network-operator" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/cattle-monitoring" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/cattle-fleet" (.Values._internal | dig "sylva_mgmt_enabled_units" "rancher" false) | include "set-only-if" }}\' # effective only in workload clusters
- \'{{ tuple "components/ceph-csi-cephfs" (tuple . "ceph-csi-cephfs" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/gitea" (tuple . "gitea" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/minio-operator" (tuple . "minio-operator" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/minio-monitoring" (tuple . "minio-monitoring" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/minio-logging" (tuple . "minio-logging" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/thanos" (tuple . "thanos" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/loki" (tuple . "loki" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/kepler" (tuple . "kepler" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/kube-system" (list "cabpr" "cabpck" | has .Values.cluster.capi_providers.bootstrap_provider) | include "set-only-if" }}\'
- \'{{ tuple "components/neuvector" (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/kunai" (tuple . "kunai" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/sbom-operator" (tuple . "sbom-operator" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/velero" (tuple . "velero" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/rancher-turtles" (tuple . "rancher-turtles" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/kube-logging" (tuple . "logging" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/trivy-operator" (tuple . "trivy-operator" | include "unit-enabled") | include "set-only-if" }}\'
flux-common:
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/flux-system/in-cluster
targetNamespace: flux-system
wait: true
# prevent Flux from uninstalling itself
prune: false
_components:
- \'{{ tuple "../components/extra-ca" .Values.oci_registry_extra_ca_certs | include "set-only-if" }}\'
postBuild:
substitute:
var_substitution_enabled: "true" # To force substitution when configmap does not exist
EXTRA_CA_CERTS: \'{{ tuple (.Values.oci_registry_extra_ca_certs | default "" | b64enc) .Values.oci_registry_extra_ca_certs | include "set-only-if" }}\'
KUSTOMIZE_CONCURRENT: \'{{ .Values.flux.kustomize.concurrent }}\'
KUSTOMIZE_LOG_LEVEL: \'{{ .Values.flux.kustomize.log_level }}\'
KUSTOMIZE_FEATURES_GATES: \'{{ tuple .Values.flux.kustomize.features_gates | include "dict_to_key_values_separated_string" }}\'
HELM_CONCURRENT: \'{{ .Values.flux.helm.concurrent }}\'
HELM_LOG_LEVEL: \'{{ .Values.flux.helm.log_level }}\'
HELM_FEATURES_GATES: \'{{ tuple .Values.flux.helm.features_gates | include "dict_to_key_values_separated_string" }}\'
SOURCE_CONCURRENT: \'{{ .Values.flux.source.concurrent }}\'
SOURCE_LOG_LEVEL: \'{{ .Values.flux.source.log_level }}\'
SOURCE_FEATURES_GATES: \'{{ tuple .Values.flux.source.features_gates | include "dict_to_key_values_separated_string" }}\'
SOURCE_STORAGE_CLASS: >-
{{ tuple . (tuple . "longhorn" | include "unit-enabled") "single-replica-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}
SOURCE_STORAGE_SIZE: 1Gi
substituteFrom:
- kind: ConfigMap
name: proxy-env-vars
optional: true
# this unit template gather depends_on common
# to most units
base-deps:
# the actual dependencies aren\'t the same for bootstrap/management and workload-cluster
# the actual content is found in values files specific to each
# this, below, ensures that we avoid having units being updated in parallel
# with a CAPI node creation and/or rolling update:
# - the units that the \'cluster\' unit depends on, directly or indirectly, will reconcile first
# - then \'cluster\' unit will reconcile
# - then units on which the \'cluster\' unit does not depend on, directly or indirectly, will
# reconcile after the "cluster-machines-ready" unit is ready
depends_on:
cluster-machines-ready: >-
{{
and (tuple . "cluster-machines-ready" | include "unit-enabled")
(not (.Values._internal.cluster_machines_ready_unit_deps | has .Values._unit_name_))
}}
# this unit template is used to define a unit inheriting from
# kustomize-units/kube-job
kube-job:
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kube-job
wait: true
force: true
postBuild:
substitute:
JOB_NAME: \'{{ .Values._kustomization_name_ }}\'
NAMESPACE: \'{{ .Release.Namespace }}\'
JOB_CHECKSUM: \'{{ list .Values .Values._internal.sylva_core_version | toJson | sha256sum }}\' # can be overriden to use something more specific
_patches:
# the transformations below are necessary to change the ConfigMap name/namespace
# _because_ to avoid interferences between shell syntax and envsubst, we explicitely
# disable envsubst substitution in this resource (kustomize.toolkit.fluxcd.io/substitute: disabled)
- target:
kind: ConfigMap
labelSelector: kube-job=script
patch: |
- op: replace
path: /metadata/namespace
value: {{ .Values.units | dig .Values._unit_name_ "kustomization_spec" "postBuild" "substitute" "NAMESPACE" .Release.Namespace }}
- op: replace
path: /metadata/name
value: kube-job-{{ .Values._kustomization_name_ }}
- target:
kind: Job
patch: |
kind: Job
metadata:
name: _unused_
spec:
template:
spec:
volumes:
- name: script-volume
configMap:
name: kube-job-{{ .Values._kustomization_name_ }}
kube-cronjob:
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kube-cronjob
wait: true
force: true
postBuild:
substitute:
CRONJOB_NAME: \'{{ .Values._kustomization_name_ }}\'
NAMESPACE: \'{{ .Release.Namespace }}\'
JOB_CHECKSUM: \'{{ list .Values .Values._internal.sylva_core_version | toJson | sha256sum }}\' # can be overriden to use something more specific
_patches:
# the transformations below are necessary to change the ConfigMap name/namespace
# _because_ to avoid interferences between shell syntax and envsubst, we explicitely
# disable envsubst substitution in this resource (kustomize.toolkit.fluxcd.io/substitute: disabled)
- target:
kind: ConfigMap
labelSelector: kube-cronjob=script
patch: |
- op: replace
path: /metadata/namespace
value: {{ .Values.units | dig .Values._unit_name_ "kustomization_spec" "postBuild" "substitute" "NAMESPACE" .Release.Namespace }}
- op: replace
path: /metadata/name
value: kube-cronjob-{{ .Values._kustomization_name_ }}
- target:
kind: CronJob
patch: |
kind: CronJob
metadata:
name: _unused_
spec:
jobTemplate:
spec:
template:
spec:
volumes:
- name: script-volume
configMap:
name: kube-cronjob-{{ .Values._kustomization_name_ }}
backup-s3:
repo: sylva-core
kustomization_substitute_secrets:
S3_HOST: \'{{ (.Values.backup | default dict) | dig "store" "s3" "host" "" | b64enc }}\'
S3_BUCKET: \'{{ (.Values.backup | default dict) | dig "store" "s3" "bucket" "" | b64enc }}\'
S3_REGION: \'{{ (.Values.backup | default dict) | dig "store" "s3" "region" "" | b64enc }}\'
S3_ACCESS_KEY: \'{{ (.Values.backup | default dict) | dig "store" "s3" "access_key" "" | b64enc }}\'
S3_SECRET_KEY: \'{{ (.Values.backup | default dict) | dig "store" "s3" "secret_key" "" | b64enc }}\'
S3_CERT: \'{{ (.Values.backup | default dict) | dig "store" "s3" "cert" "" }}\'
kustomization_spec:
postBuild:
substitute:
NAMESPACE: \'{{ .Release.Namespace }}\'
_components:
- "./components/backup-to-s3"
- \'{{ tuple "./components/backup-to-s3/cert" (hasKey ((.Values.backup | default dict) | dig "store" "s3" dict) "cert") | include "set-only-if" }}\'
vault-template:
depends_on:
vault-init: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': true
vault-operator: true
sylva-ca: true
external-secrets-operator: true
annotations:
sylvactl/readyMessage: "Vault UI can be reached at https://{{ .Values.external_hostnames.vault }} ({{ .Values._internal.display_external_ip_msg }})"
sylvactl/unitTimeout: "{{ printf \\"%dm\\" (mul 5 .Values.cluster.control_plane_replicas) }}"
repo: sylva-core
kustomization_substitute_secrets:
ADMIN_PASSWORD: \'{{ .Values.admin_password }}\'
KEY: \'{{ .Values.external_certificates.vault.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/vault
_patches:
# Create a list of ACL and read roles, one per team expected to deploy workload clusters. The loop over .Values.workloadcluster.team is done in the Helm template
- target:
group: vault.banzaicloud.com
version: v1alpha1
kind: Vault
name: vault
# Empty patch is not allowed, hence the if (no workload cluster teams) then dummy patch (which is always true)
patch: |
{{- if .Values.workload_clusters.teams -}}
{{- range $name, $_ := .Values.workload_clusters.teams }}
- op: add
path: /spec/externalConfig/policies/-
value:
name: secret-reader-{{ $name }}
rules: |
path "secret/data/{{ $name }}/*" {
capabilities = [ "read", "list" ]
}
- op: add
path: /spec/externalConfig/policies/-
value:
name: secret-rw-{{ $name }}
rules: |
path "secret/data/{{ $name }}/*" {
capabilities = [ "create", "update", "delete", "read", "list" ]
}
- op: add
path: /spec/externalConfig/auth/0/roles/-
value:
name: secret-reader-{{ $name }}
bound_service_account_names: [secretstore-access-{{ $name }}]
bound_service_account_namespaces: ["{{ $name }}"]
policies: [ "secret-reader-{{ $name }}"]
ttl: 1h
{{- end}}
{{- else -}}
- op: test
path: /spec/externalConfig/secrets/0/type
value: kv
{{- end -}}
postBuild:
substitute:
VAULT_DNS: \'{{ .Values.external_hostnames.vault }}\'
VAULT_REPLICAS: \'{{ .Values._internal.vault_replicas }}\'
MAX_POD_UNAVAILABLE: \'{{ int .Values._internal.vault_replicas | eq 1 | ternary 0 1 }}\'
AFFINITY: \'{{ and (eq (int .Values.cluster.control_plane_replicas) 1) (ne .Values._internal.default_storage_class_unit "local-path") | ternary (.Values._internal.vault_no_affinity | toJson | indent 12 ) (.Values._internal.vault_affinity | toJson| indent 12) }}\'
SERVICE: vault
SERVICE_DNS: \'{{ .Values.external_hostnames.vault }}\'
CERT: \'{{ .Values.external_certificates.vault.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\' # CA of the certificate injected to the local Vault
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
_components:
# generate certificate for external communitation
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.vault "cert") }}\'
- "../tls-components/sylva-ca"
healthChecks: # sometimes this kustomization seems correctly applied while vault pod is not running, see https://gitlab.com/sylva-projects/sylva-core/-/issues/250
# so we replace wait:true by checking for the Vault components health
- apiVersion: apps/v1
kind: StatefulSet
name: vault
namespace: vault
- apiVersion: apps/v1
kind: Deployment
name: vault-operator
namespace: vault
- apiVersion: v1
kind: Service
name: vault-operator
namespace: vault
- apiVersion: v1
kind: Service
name: vault
namespace: vault
- apiVersion: v1
kind: Service
name: vault-configurer
namespace: vault
- apiVersion: apps/v1
kind: Deployment
name: vault-configurer
namespace: vault
# unit_definition_defaults is merged with each "units.x" before
# it inherits from unit templates
unit_definition_defaults:
# the root-dependency wait job will wait on resources having this label
labels:
sylva-units/root-dependency-wait: ""
annotations:
sylva-units-helm-revision: \'{{ .Release.Revision }}\'
depends_on:
root-dependency: >-
{{
and
(tuple . "root-dependency" | include "unit-enabled")
(not (.Values._unit_name_ | eq "root-dependency"))
}}
# this defines Flux Kustomizations and the related ConfigMaps and Secrets
# for Helm-based unit, the Kustomization will produce a Flux HelmRelease
units:
# <unit-name>:
# info: # unit metadata mainly for documentation purpose
# description: <short unit description>
# details: <more detailed data about unit purpose and usage>
# maturity: <level of integration in sylva stack of corresponding component>
# internal: true <for units fully defined in sylva-core/kustomize-units without relying on external resources>
# version: <force declarative version, not recommended>
# enabled: boolean or GoTPL
# repo: <name of the repo under \'source_templates\'> (for use with kustomization_spec)
# helm_repo_url: URL of the Helm repo to use (for use with helmrelease_spec, but not mandatory, \'repo\' can be used as well to use a git repo)
# labels: (optional) dict holding labels to add to the resources for this unit
# ref_override: optional, if defined, this dict will be used for the GitRepository or OCIRepository overriding spec.ref (not used if some helm_repo_* is set)
# depends_on: dict defining the dependencies of this unit, keys are unit names, values are booleans
# (these dependencies are injected in the unit Kustomization via \'spec.dependsOn\')
# suspend: true/false # (optional) if set to true the spec.suspend will be set on the main Flux resource for the unit
# # ie. the Kustomization for a pure-kustomize unit, or the HelmRelease for a Helm-based unit
# helmrelease_spec: # optionnal, contains a partial spec for a FluxCD HelmRelease, all the
# # key things are generated from unit_helmrelease_spec_default
# # and from other fields in the unit definition
# _postRenderers: # this field can be used in this file, it will be merged into user-provided \'postRenderers\'
# helm_chart_artifact_name: optional, if specified, when deploying the Helm chart from an OCI artifact,
# helm_chart_artifact_name will be used as chart name instead of helmrelease_spec.chart.spec.chart last path item
# this is required if helmrelease_spec.chart.spec.chart is empty, \'.\' or \'/\'
# (also used by tools/oci/push-helm-chart to generate the artifact)
# helm_chart_versions: optional, if specified, when deploying the Helm chart from an OCI artifact or Helm registry,
# it will drive the version to be used from a dict of <version>:<boolean>
# in case if helmrelease_spec.chart.spec.chart.version is not set
# (also used by tools/oci/push-helm-charts-artifacts.py to generate the artifact)
# kustomization_spec: # contains a partial spec for a FluxCD Kustomization, most of the
# # things are generated from unit_kustomization_spec_default
# # sourceRef is generated from .git_repo field
# path: ./path-to-unit-under-repo
# # the final path will hence be:
# # - <git repo template>.spec.url + <unit>.spec.path (if <git repo template> has spec.url defined)
# _patches: # this field can be used in this file, it will be merged into user-provided \'patches\'
# _components: # this field can be used in this file, it will be merged into user-provided \'components\'
#
# helm_secret_values: # (dict), if set what is put here is injected in HelmRelease.valuesFrom as a Secret
# kustomization_substitute_secrets: # (dict), if set what is put here is injected in Kustomization.postBuild.substituteFrom as a Secret
# unit_templates: optional, list of names of "unit templates"
# unit templates are defined under "unit_templates"
# the settings for the unit are inherited from the corresponding entries under .Values.unit_templates,
# merging them in the specified order
# one_shot: true/false (default: false)
# This is used for units that need to run only once on a given cluster.
# A unit having "one_shot: true" will be automatically disabled if it has already
# run once on an application of sylva-units that ran to completion (until reconciliation
# of sylva-units-status).
# It is typically used for kube-job units:
# * that are needed to apply a given change for a sylva-upgrade
# * or that need to run at cluster creation time, but not after (eg. for metallb
# transitioning from RKE2-managed to Flux-managed)
# NOTE WELL: there is *no* guarantee that a one_shot unit will never be replayed (it will be replayed on a re-run after
# a sylva-units update that didn\'t complete)
root-dependency:
info:
description: special unit ensuring ordered updates of all Kustomizations
details: >-
All Kustomizations will depend on this Kustomization, whose name is `root-dependency-<n>`
and changes at each update of the sylva-units Helm release. This Kustomization does not become
ready before all other Kustomizations have been updated.
This unit also manages the `root-dependency-<n>` HelmRelease that acts as a lock to prevent
HelmReleases from reconciling before units they depend on are ready.
All this ensures in a race-free way that during an update, units will be reconciled in an
order matching dependency declarations.
internal: true
# Note:
# for this unit, the names of all resources produced must include .Release.Revision
# or another string that is guaranteed to change for each new sylva-units
# Helm release revision (or else the old and new Kustomization will control the same resource,
# which hence might get deleted when the old Kustomization is deleted), see use of .Values._kustomization_name_ below
#
# This unit is *both* a Helm-based unit (to produce the root-dependency-<n> HelmRelease)
# *AND* a Kustomize-based unit (to produce the kube-job running root-dependency-check.sh)
#
# This makes the definition of this unit wild and exotic.
# Don\'t use this unit to learn sylva-units !
#
unit_templates:
- kube-job
labels:
sylva-units/root-dependency-wait: null # cancel the label set in unit_definition_defaults, because the root-dependency wait job should _not_ wait for this unit
repo: devnull
helm_chart_artifact_name: devnull
helmrelease_spec:
chart:
spec:
chart: .
interval: 168h
kustomization_name: root-dependency-{{ .Release.Revision }} # this makes the Kustomization name dynamic
kustomization_spec:
interval: 168h
# we need to specify the Kustomization path or else we\'d have kustomize-units/kube-job
# inherited via kube-job unit template ...
path: ./kustomize-units/helmrelease-generic
# .. but then we still need to have the kube-job definition which we inject via a component
_components:
- ../kube-job-as-a-component
postBuild:
substitute:
POD_ACTIVE_DEADLINE_SECONDS: "120" # if a single pod run of this Job was to take more time than that, it is probably stuck
# for root-dependency-check.sh kube-job
JOB_CHECKSUM: \'{{ .Release.Revision }}\'
RUNASUSER: \'10000\'
RUNASGROUP: \'10000\'
HELM_VERSION: \'{{ .Release.Revision }}\'
_patches:
# for root-dependency-check.sh kube-job
- \'{{ include "kube-job-add-env-var-patch" (dict "HELM_REVISION" .Release.Revision )}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/root-dependency-check.sh") }}\'
# for root-dependency-<n> HelmRelease
- target:
kind: HelmRelease
patch: |-
- op: replace
path: /metadata/name
value: root-dependency-{{ .Release.Revision }}
- op: add
path: /metadata/labels/sylva-units.version
value: "{{ .Release.Revision }}"
- op: remove
path: /spec/dependsOn
validating-admission-policies:
info:
description: configures validating admission policies
internal: true
unit_templates: []
depends_on: {}
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/validating-admission-policies
wait: true
_components:
- \'{{ tuple "components/management-cluster-only" .Values._internal.mgmt_cluster | include "set-only-if" }}\'
- \'{{ tuple "components/deny-cabundle-changes" (tuple . "cert-manager" | include "unit-enabled") | include "set-only-if" }}\'
flux-system:
info:
description: contains Flux definitions *to manage the Flux system itself via gitops*
details: Note that Flux is always installed on the current cluster as a pre-requisite to installing the chart
maturity: core-component
labels:
sylva-units/protected: ""
repo: sylva-core
unit_templates: # we intendedly don\'t inherit from base-deps, because flux is itself part of base dependencies
- flux-common
cert-manager:
info:
description: installs cert-manager, an X.509 certificate controller
maturity: core-component
unit_templates:
- base-deps
helm_repo_url: https://charts.jetstack.io
helmrelease_spec:
chart:
spec:
chart: cert-manager
version: v1.18.2
targetNamespace: cert-manager
install:
createNamespace: true
values:
crds:
enabled: true
http_proxy: \'{{ tuple .Values.proxies.http_proxy (hasKey .Values.security.external_x509_issuer "issuer_type") | include "set-only-if" }}\' #proxy setting is required to reach external cert issuers
https_proxy: \'{{ tuple .Values.proxies.https_proxy (hasKey .Values.security.external_x509_issuer "issuer_type") | include "set-only-if" }}\'
no_proxy: \'{{ tuple .Values.proxies.no_proxy (hasKey .Values.security.external_x509_issuer "issuer_type") | include "set-only-if" }}\'
replicaCount: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: controller
topologyKey: kubernetes.io/hostname
podDisruptionBudget:
enabled: true
minAvailable: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}
webhook:
replicaCount: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 | include "preserve-type" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: webhook
topologyKey: kubernetes.io/hostname
podDisruptionBudget:
enabled: true
minAvailable: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}
cainjector:
replicaCount: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: cainjector
topologyKey: kubernetes.io/hostname
podDisruptionBudget:
enabled: true
minAvailable: >-
{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
kube-storage-version-migrator:
enabled: false
info:
description: installs kube-storage-version-migrator to assist apiVersion migrations
maturity: beta
internal: true
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kube-storage-version-migrator
wait: true
trivy-operator:
info:
description: installs Trivy operator
maturity: beta
enabled: no
unit_templates:
- base-deps
helm_repo_url: https://aquasecurity.github.io/helm-charts/
helmrelease_spec:
chart:
spec:
chart: trivy-operator
version: 0.30.0
targetNamespace: trivy-system
install:
createNamespace: false
values:
image:
registry: docker.io
repository: aquasec/trivy-operator
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 10000
runAsUser: 10000
serviceAccount:
annotations: {}
create: true
name: trivy-operator
operator:
builtInTrivyServer: true
httpProxy: \'{{ .Values.proxies.https_proxy }}\'
httpsProxy: \'{{ .Values.proxies.https_proxy }}\'
noProxy: \'{{ printf "%s,%s" (include "sylva-units.no_proxy" (tuple .)) "trivy-service.trivy-system" }}\'
trivy:
image:
registry: ghcr.io
repository: aquasecurity/trivy
httpProxy: \'{{ .Values.proxies.https_proxy }}\'
httpsProxy: \'{{ .Values.proxies.https_proxy }}\'
noProxy: \'{{ printf "%s,%s" (include "sylva-units.no_proxy" (tuple .)) "trivy-service.trivy-system" }}\'
severity: UNKNOWN,HIGH,CRITICAL
dbRegistry: ghcr.io
dbRepository: aquasecurity/trivy-db
javaDbRegistry: ghcr.io
javaDbRepository: aquasecurity/trivy-java-db
insecureRegistries: \'{{ .Values.security.trivy_operator.insecure_registries | include "set-if-defined" }}\'
registry:
mirror: \'{{ .Values.security.trivy_operator.mirrors | include "set-if-defined" }}\'
policiesBundle:
registry: ghcr.io
repository: aquasecurity/trivy-checks
compliance:
specs:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary "rke2-cis-1.24" "k8s-cis-1.23" }}\'
nodeCollector:
volumes: \'{{ .Values._internal.node_collector_volumes | include "preserve-type" }}\'
volumeMounts: \'{{ .Values._internal.node_collector_volume_mounts | include "preserve-type" }}\'
trivyOperator:
scanJobPodTemplatePodSecurityContext:
runAsGroup: 10000
runAsUser: 10000
scanJobPodTemplateContainerSecurityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 10000
runAsUser: 10000
sylva-ca:
info:
description: configures the Certificate Authority for units of the Sylva stack
internal: true
unit_templates:
- base-deps
depends_on:
cert-manager: true
repo: sylva-core
kustomization_substitute_secrets:
VAULT_TOKEN: \'{{ .Values.security.external_x509_issuer.vault_token | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/sylva-ca
wait: true
postBuild:
substitute:
CA_CHAIN: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
ISSUER_SERVER: \'{{ .Values.security.external_x509_issuer.server }}\'
VAULT_PATH: \'{{ .Values.security.external_x509_issuer.vault_path }}\'
_patches:
- target:
kind: ClusterSecretStore
name: eso-store-k8s-cert-manager
patch: |
{{- if .Values.workload_clusters.teams -}}
{{- range $name, $_ := .Values.workload_clusters.teams }}
- op: add
path: /spec/conditions/0/namespaces/-
value:
{{ $name }}
{{- end -}}
{{- else -}}
- op: test
path: /kind
value: ClusterSecretStore
{{- end -}}
namespace-defs:
info:
description: creates sylva-system namespace and other namespaces to be used by various units
internal: true
unit_templates:
- base-deps
- namespace-defs
depends_on:
# namespace-defs can\'t depend on Kyverno, because Kyverno depends on
# some namespaces
kyverno: false
kyverno-policies: false
cnpg-operator:
info:
description: Cloud Native PostgreSQL (CNPG) Operator
maturity: stable
enabled_conditions:
- \'{{ or (tuple . "keycloak" | include "unit-enabled")
(tuple . "harbor" | include "unit-enabled")
(tuple . "gitea" | include "unit-enabled")
(tuple . "kunai" | include "unit-enabled")
}}\'
unit_templates:
- base-deps
helm_repo_url: https://cloudnative-pg.github.io/charts
helmrelease_spec:
install:
createNamespace: true
chart:
spec:
chart: cloudnative-pg
version: 0.26.0
targetNamespace: cnpg-system
metrics-server-ha:
info:
description: metrics-server configmap for ha values
maturity: stable
internal: true
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
- \'{{ .Values._internal.ha_cluster.is_ha }}\'
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
targetNamespace: \'{{ .Release.Namespace }}\'
path: ./kustomize-units/metrics-server-ha
wait: true
metrics-server:
info:
description: metrics-server install
maturity: stable
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
depends_on:
metrics-server-ha: \'{{ .Values._internal.ha_cluster.is_ha }}\'
unit_templates:
- base-deps
helm_repo_url: https://rke2-charts.rancher.io
helm_chart_versions:
3.12.203: >-
{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}
3.13.001: >-
{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}
helmrelease_spec:
driftDetection:
ignore:
- target:
kind: APIService
name: v1beta1.metrics.k8s.io
paths:
- /metadata/annotations/meta.helm.sh~1release-name
- /metadata/labels/app.kubernetes.io~1instance
- /metadata/labels/helm.toolkit.fluxcd.io~1name
- /metadata/labels/helm.toolkit.fluxcd.io~1namespace
- /spec/service/name
install:
createNamespace: false
chart:
spec:
chart: rke2-metrics-server
version: ""
targetNamespace: kube-system
values:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: "true"
valuesFrom: >-
{{
tuple (list (dict "kind" "ConfigMap"
"name" "metrics-server-ha-values"))
.Values._internal.ha_cluster.is_ha
| include "set-only-if"
}}
keycloak-postgres:
info:
description: "[Deprecated] Deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)"
details: >
Legacy Unit that used to deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)
This unit will be removed once all deployments will be migrated to the new keycloak-postgresql (sylva 1.6+)
maturity: stable
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
# Enable this unit only if required to transition from \'keycloak-postgres\' (historic unit with old settings) to \'keycloak-postgresql\', that is:
# - If \'keycloak-postgres\' unit was already successfully installed before
- \'{{ lookup "kustomize.toolkit.fluxcd.io/v1" "Kustomization" "sylva-system" "keycloak-postgres" | dig "status" "observedGeneration" -1 | ne -1 }}\'
# - Only if the reconciliation of \'keycloak-postgresql\' (new unit) has never completed yet, meaning that the migration is not finished
- \'{{ lookup "kustomize.toolkit.fluxcd.io/v1" "Kustomization" "sylva-system" "keycloak-postgresql" | dig "status" "observedGeneration" -1 | eq -1 }}\'
repo: sylva-core
depends_on:
cnpg-operator: true
keycloak-init: true
annotations:
sylvactl/unitTimeout: 15m
kustomization_spec:
postBuild:
substitute:
KEYCLOAK_POSTGRES_REPLICAS: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
storageClass: \'{{ .Values._internal.default_storage_class }}\'
podAntiAffinityType: \'{{ .Values._internal.ha_cluster.is_ha | ternary "required" "preferred" }}\'
path: ./kustomize-units/keycloak-postgres
_components:
- \'{{ tuple "./components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
- \'{{ tuple "./components/keycloak-upgrade-db" (and .Values._internal.state.is_upgrade (tuple . "keycloak" | include "unit-enabled")) | include "set-only-if" }}\'
# Add WAL dedicated PVC only if it was previously defined, in order to prevent this postgresql cluster from being reconfigured
# when sylva is upgraded from 1.4, just before this unit will be replaced by keycloak-postgres one.
- >-
{{- if (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "clusters.postgresql.cnpg.io") }}
{{- tuple
"./components/wal-pvc"
(lookup "postgresql.cnpg.io/v1" "Cluster" "keycloak" "cnpg-keycloak" | dig "spec" "walStorage" "size" "" | eq "2Gi")
| include "set-only-if"
}}
{{- else }}
{{- tuple "" false | include "set-only-if" }}
{{- end }}
healthChecks:
- apiVersion: postgresql.cnpg.io/v1
kind: Cluster
name: cnpg-keycloak
namespace: keycloak
healthCheckExprs:
# CNPG does not expose a kstatus compatible status (missing observedGeneration), we use status.phase instead
# It seems to report an accurate view, see https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.27.0/api/v1/cluster_types.go#L642
- apiVersion: postgresql.cnpg.io/v1
kind: Cluster
current: status.phase == "Cluster in healthy state"
failed: status.phase != "Cluster in healthy state"
kubevirt:
info:
description: installs kubevirt
maturity: beta
unit_templates:
- base-deps
helm_repo_url: https://suse-edge.github.io/charts
helmrelease_spec:
chart:
spec:
chart: kubevirt
version: 0.6.0
targetNamespace: kubevirt
install:
createNamespace: true
values:
kubevirt:
configuration:
vmRolloutStrategy: "LiveUpdate"
developerConfiguration:
featureGates:
- NUMA
- CPUManager
- Snapshot
- ExpandDisks
- VMExport
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
kubevirt-test-vms-remove:
info:
description: remove kubevirt test vms before upgrade
internal: true
one_shot: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "kubevirt-test-vms" | include "unit-enabled" }}\'
- \'{{ .Values._internal.state.is_upgrade }}\'
kustomization_spec:
targetNamespace: kubevirt-tests
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/kubevirt-delete-vm.sh") }}\'
kubevirt-test-vms:
info:
description: deploys kubevirt VMs for testing
internal: true
test: true
enabled_conditions:
- \'{{ gt (int .Values._internal.worker_node_count) 0 }}\'
unit_templates:
- base-deps
depends_on:
kubevirt: true
multus: true
kubevirt-test-vms-remove: \'{{ tuple . "kubevirt-test-vms-remove" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kubevirt-test-vms
wait: true
targetNamespace: kubevirt-tests
postBuild:
substitute:
CPU_CORE: \'1\'
REQUESTED_MEMORY: \'128Mi\'
# renovate: datasource=docker
IMAGE: quay.io/kubevirt/cirros-container-disk-demo:v1.6.1
CLOUD_INIT_NO_CLOUD: SGkuXG4=
kubevirt-manager-deployment-remove:
info:
description: remove kubevirt manager deployment before upgrade
internal: true
one_shot: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "kubevirt-manager" | include "unit-enabled" }}\'
- \'{{ .Values._internal.state.is_upgrade }}\'
kustomization_spec:
targetNamespace: kubevirt-manager
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/kubevirt-manager-deployment-delete.sh") }}\'
kubevirt-manager:
info:
description: deploys kubevirt-manager UI for kubevirt workloads
internal: true
enabled_conditions:
- \'{{ tuple . "kubevirt" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
kubevirt-manager-deployment-remove: \'{{ tuple . "kubevirt-manager-deployment-remove" | include "unit-enabled" }}\'
sylva-ca: true
external-secrets-operator: true
repo: sylva-core
kustomization_substitute_secrets:
ADMIN_CREDENTIAL: \'{{ (htpasswd .Values._internal.kubevirt_admin_user .Values._internal.kubevirt_admin_password) | b64enc | quote }}\'
annotations:
sylvactl/readyMessage: "Kubevirt manager UI can be reached at https://{{ .Values.external_hostnames.kubevirt_manager }} ({{ .Values._internal.display_external_ip_msg }})"
kustomization_spec:
path: ./kustomize-units/kubevirt-manager
wait: true
targetNamespace: kubevirt-manager
postBuild:
substitute:
SERVICE: kubevirt-manager
SERVICE_DNS: \'{{ .Values.external_hostnames.kubevirt_manager }}\'
CERTIFICATE_NAMESPACE: kubevirt-manager
CERT: \'{{ .Values.external_certificates.kubevirt_manager.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.kubevirt_manager "cert") }}\'
- "../tls-components/sylva-ca"
_patches:
- target:
kind: Deployment
patch: |
- op: replace
path: /spec/template/spec/containers/0/securityContext
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
runAsGroup: 30000
runAsUser: 10000
seccompProfile:
type: RuntimeDefault
kubevirt-cdi:
info:
description: manages Kubevirt CDI - Container Data Importer
maturity: beta
enabled_conditions:
- \'{{ tuple . "kubevirt" | include "unit-enabled" }}\'
unit_templates:
- base-deps
helm_repo_url: https://suse-edge.github.io/charts
helm_chart_artifact_name: kubevirt-cdi
helmrelease_spec:
chart:
spec:
chart: cdi
version: 0.4.0
targetNamespace: kubevirt-cdi
install:
createNamespace: true
values:
cdi:
config:
featureGates:
- HonorWaitForFirstConsumer
importProxy:
HTTPProxy: \'{{ get .Values.proxies "http_proxy" }}\'
HTTPSProxy: \'{{ get .Values.proxies "https_proxy" }}\'
noProxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
harbor-init:
info:
description: sets up Harbor prerequisites
details: it generates namespace, certificate, admin password, OIDC configuration
internal: true
depends_on:
namespace-defs: true
vault-init: true
sylva-ca: true
external-secrets-operator: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "harbor" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.harbor.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/harbor-init
wait: true
postBuild:
substitute:
HARBOR_DNS: \'{{ .Values.external_hostnames.harbor }}\'
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
SERVICE: harbor
SERVICE_DNS: \'{{ .Values.external_hostnames.harbor }}\'
CERTIFICATE_NAMESPACE: harbor
CERT: \'{{ .Values.external_certificates.harbor.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.harbor "cert") }}\'
- "../tls-components/sylva-ca"
harbor-postgres:
info:
description: installs Postgresql for Harbor
maturity: stable
depends_on:
namespace-defs: true
harbor-init: \'{{ tuple . "harbor-init" | include "unit-enabled" }}\' # conditional, because in workload clusters harbor-init isn\'t used
\'{{ .Values._internal.default_storage_class_unit }}\': true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "harbor" | include "unit-enabled" }}\'
repo: bitnami-postgresql
helmrelease_spec:
chart:
spec:
chart: bitnami/postgresql
targetNamespace: harbor
values:
image:
repository: bitnamilegacy/postgresql
metrics:
image:
repository: bitnamilegacy/postgres-exporter
volumePermissions:
image:
repository: bitnamilegacy/os-shell
auth:
username: harbor
database: harbor
existingSecret: harbor-postgres-secrets
secretKeys:
adminPasswordKey: admin-password
userPasswordKey: password
replicationPasswordKey: replication-password
architecture: replication
nameOverride: postgres
serviceAccount:
create: true
name: postgresql
primary:
pdb:
create: false # this is non-HA and single replica, so a PDB does not make sense
extendedConfiguration: |-
huge_pages = off
initdb:
args: "--set huge_pages=off"
readReplicas:
extendedConfiguration: |-
huge_pages = off
replicaCount: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: postgres
app.kubernetes.io/component: read
topologyKey: kubernetes.io/hostname
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
harbor:
info:
description: installs Harbor
maturity: beta
unit_templates:
- base-deps
depends_on:
namespace-defs: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
harbor-init: \'{{ tuple . "harbor-init" | include "unit-enabled" }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': true
harbor-postgres: true
monitoring-crd: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
annotations:
sylvactl/readyMessage: "Harbor UI can be reached at https://{{ .Values.external_hostnames.harbor }} ({{ .Values._internal.display_external_ip_msg }})"
helm_repo_url: https://helm.goharbor.io
helmrelease_spec:
chart:
spec:
chart: harbor
version: 1.17.1
targetNamespace: harbor
values:
updateStrategy:
type: \'{{ eq .Values._internal.harbor_storage_access_mode "ReadWriteMany" | ternary "RollingUpdate" "Recreate" }}\'
persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
storageClass: \'{{ .Values._internal.default_storage_class }}\'
#size: 32Gi
accessMode: \'{{ .Values._internal.harbor_storage_access_mode }}\'
jobservice:
jobLog:
storageClass: \'{{ .Values._internal.default_storage_class }}\'
#size: 8Gi
accessMode: \'{{ .Values._internal.harbor_storage_access_mode }}\'
redis:
storageClass: \'{{ .Values._internal.default_storage_class }}\'
#size: 8Gi
#trivy:
#storageClass: \'{{ .Values._internal.default_storage_class }}\'
#size: 8Gi
externalURL: \'https://{{ .Values.external_hostnames.harbor }}\'
existingSecretAdminPassword: \'{{ tuple "harbor-init" (tuple . "harbor-init" | include "unit-enabled") | include "set-only-if" }}\'
expose:
ingress:
enabled: true
className: nginx
hosts:
core: \'{{ .Values.external_hostnames.harbor }}\'
tls:
enabled: true
certSource: secret
secret:
secretName: harbor-tls
database:
type: external
external:
host: harbor-postgres-primary.harbor.svc.cluster.local
port: "5432"
username: "harbor"
coreDatabase: "harbor"
existingSecret: "harbor-postgres-secrets"
notary:
enabled: false
trivy:
enabled: false
proxy:
httpProxy: \'{{ get .Values.proxies "http_proxy" }}\'
httpsProxy: \'{{ get .Values.proxies "https_proxy" }}\'
noProxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
metrics:
enabled: true
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
install:
createNamespace: true
vault-init:
info:
description: creates vault namespace
details: this unit creates the requirements to deploy vault
internal: true
repo: sylva-core
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "vault" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/namespace-defs/baseline-namespace
targetNamespace: vault
wait: true
vault-operator:
info:
description: installs Vault operator
maturity: stable
depends_on:
vault-init: true
repo: vault-operator
unit_templates:
- base-deps
helmrelease_spec:
chart:
spec:
chart: deploy/charts/vault-operator
targetNamespace: vault
install:
createNamespace: true
values:
image:
repository: ghcr.io/bank-vaults/vault-operator
tag: \'{{ .Values.source_templates | dig "vault-operator" "spec" "ref" "tag" "" | required "source_templates.vault-operator.spec.ref.tag is unset" }}\'
openbao-set-service-label:
info:
description: Kyverno policy to add the vault-active pod label according to the presence of the openbao-active pod label
details: |
This policy set the vault-active pod label on the openbao pods according to the presence of the openbao-active pod label, so
that the vault service created by the vault-operator can select the active pod instance.
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
- \'{{ .Values.security.secret_manager.variant | eq "openbao" }}\'
depends_on:
kyverno: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/openbao-set-service-label
wait: true
openbao:
info:
description: installs Openbao
details: |
Openbao assumes that the certificate vault-tls has been issued
maturity: stable
# renovate: datasource=docker depname=ghcr.io/openbao/openbao
version: 2.4.1
unit_templates:
- base-deps
- vault-template
depends_on:
# in addition to the dependencies defined in vault-template
openbao-set-service-label: \'{{ tuple . "openbao-set-service-label" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ .Values.security.secret_manager.variant | eq "openbao" }}\'
kustomization_spec:
postBuild:
substitute:
VAULT_IMAGE: ghcr.io/openbao/openbao:{{ .Values.units | dig "openbao" "info" "version" "undefined" }}
_patches:
- patch: |
apiVersion: vault.banzaicloud.com/v1alpha1
kind: Vault
metadata:
name: vault
spec:
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 100
configPath: /openbao/config
- target:
kind: Vault
name: vault
patch: |
- op: add
path: /spec/vaultEnvsConfig/-
value:
name: BAO_K8S_POD_NAME
value: $(POD_NAME)
- op: add
path: /spec/vaultEnvsConfig/-
value:
name: BAO_CACERT
value: /vault/tls/ca.crt
- op: add
path: /spec/vaultEnvsConfig/-
value:
name: BAO_CLUSTER_ADDR
value: http://$(POD_NAME):8201
- op: add
path: /spec/vaultEnvsConfig/-
value:
name: SKIP_CHOWN
value: "true"
- op: add
path: /spec/vaultLabels/variant
value: openbao
- op: remove
path: /spec/securityContext/runAsNonRoot
hashicorp-vault:
info:
description: installs Hashicorp Vault
details: |
Vault assumes that the certificate vault-tls has been issued
maturity: stable
# renovate: datasource=docker depname=hashicorp/vault
version: 1.13.13
unit_templates:
- base-deps
- vault-template
enabled_conditions:
- \'{{ .Values.security.secret_manager.variant | eq "vault" }}\'
kustomization_spec:
postBuild:
substitute:
VAULT_IMAGE: hashicorp/vault:{{ .Values.units | dig "hashicorp-vault" "info" "version" "undefined" }}
vault:
info:
description: ensures that a secret store is installed
details: |
either hashicorp-vault or openbao is installed
maturity: stable
internal: true
unit_templates:
- dummy
depends_on:
hashicorp-vault: \'{{ .Values.security.secret_manager.variant | eq "vault" }}\'
openbao: \'{{ .Values.security.secret_manager.variant | eq "openbao" }}\'
vault-config-operator:
info:
description: installs Vault config operator
maturity: stable
unit_templates:
- base-deps
depends_on:
vault-init: true
cert-manager: true
monitoring: \'{{ .Values.units | dig "vault-config-operator" "helmrelease_spec" "values" "enableMonitoring" true }}\'
helm_repo_url: https://redhat-cop.github.io/vault-config-operator
helmrelease_spec:
chart:
spec:
chart: vault-config-operator
version: v0.8.33
targetNamespace: vault
values:
enableCertManager: true
enableMonitoring: false
vault-secrets:
info:
description: generates random secrets in vault, configure password policy, authentication backends, etc...
internal: true
repo: sylva-core
unit_templates:
- base-deps
depends_on:
vault: true
vault-config-operator: true
kustomization_spec:
path: ./kustomize-units/vault-secrets
wait: true
postBuild:
substitute:
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
_components:
- \'{{ tuple "components/keycloak" (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
vault-oidc:
info:
description: configures Vault to be used with OIDC
internal: true
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
keycloak-resources: true
vault: true
vault-config-operator: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/vault-oidc
wait: true
postBuild:
substitute:
VAULT_DNS: \'{{ .Values.external_hostnames.vault }}\'
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
external-secrets-operator:
info:
description: installs the External Secrets operator
maturity: stable
unit_templates:
- base-deps
helm_repo_url: https://charts.external-secrets.io
helmrelease_spec:
chart:
spec:
chart: external-secrets
version: 0.20.1
targetNamespace: external-secrets
install:
createNamespace: true
values:
installCRDs: true
eso-secret-stores:
info:
description: defines External Secrets stores
internal: true
unit_templates:
- base-deps
depends_on:
external-secrets-operator: true
vault: true
keycloak-init: \'{{tuple . "keycloak" | include "unit-enabled" }}\' # keycloak namespace required for keycloak component
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/eso-secret-stores
postBuild:
substitute:
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
wait: true
_components:
- \'{{ tuple "components/keycloak" (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
cis-operator-crd:
info:
description: install CIS operator CRDs
maturity: stable
hidden: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "cis-operator" | include "unit-enabled" }}\'
helm_repo_url: https://charts.rancher.io
helmrelease_spec:
chart:
spec:
chart: rancher-cis-benchmark-crd
version: 106.0.0+up8.0.0
targetNamespace: cis-operator-system
install:
createNamespace: true
kustomization_spec:
prune: false
cis-operator:
info:
description: install CIS operator
maturity: stable
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
depends_on:
cis-operator-crd: true
helm_repo_url: https://charts.rancher.io
helmrelease_spec:
chart:
spec:
chart: rancher-cis-benchmark
version: 106.0.0+up8.0.0
targetNamespace: cis-operator-system
cis-operator-scan:
info:
description: allows for running a CIS scan for management cluster
details: |
it generates a report which can be viewed and downloaded in CSV from the Rancher UI, at https://rancher.sylva/dashboard/c/local/cis/cis.cattle.io.clusterscan
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "cis-operator" | include "unit-enabled" }}\'
depends_on:
cis-operator: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/cis-operator-scan
wait: false
postBuild:
substitute:
SCAN_PROFILE: \'{{ .Values.cis_benchmark_scan_profile }}\'
neuvector-init:
info:
description: sets up Neuvector prerequisites
details: |
it generates certificate, admin password, policy exception for using latest tag images (required for the pod managing the database of vulnerabilities since this DB is updated often)
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "neuvector" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
vault: true
vault-config-operator: true
kyverno: true
keycloak-add-client-scope: true
keycloak-oidc-external-secrets: true
kyverno-policies-ready: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.neuvector.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/neuvector-init
healthChecks:
- apiVersion: v1
kind: Secret
name: neuvector-init
namespace: neuvector
- apiVersion: v1
kind: Secret
name: neuvector-oidc-init
namespace: neuvector
postBuild:
substitute:
NEUVECTOR_DNS: \'{{ .Values.external_hostnames.neuvector }}\'
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
SERVICE: neuvector
SERVICE_DNS: \'{{ .Values.external_hostnames.neuvector }}\'
CERTIFICATE_NAMESPACE: neuvector
CERT: \'{{ .Values.external_certificates.neuvector.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.neuvector "cert") }}\'
- "../tls-components/sylva-ca"
neuvector:
info:
description: installs Neuvector
maturity: beta
enabled: no
unit_templates:
- base-deps
depends_on:
neuvector-init: \'{{ tuple . "neuvector-init" | include "unit-enabled" }}\'
ingress-nginx: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" }}\'
annotations:
sylvactl/readyMessage: "Neuvector UI can be reached at https://{{ .Values.external_hostnames.neuvector }} ({{ .Values._internal.display_external_ip_msg }})"
helm_repo_url: https://neuvector.github.io/neuvector-helm
helm_chart_artifact_name: neuvector-core
helmrelease_spec:
chart:
spec:
chart: core
version: 2.8.3
targetNamespace: neuvector
values:
leastPrivilege: true
internal:
certmanager:
enabled: true
secretname: neuvector-internal
autoGenerateCert: false
controller:
replicas: 1 # PVC only works for 1 replica https://github.com/neuvector/neuvector-helm/issues/110#issuecomment-1251921734
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
image:
repository: neuvector/controller
internal:
certificate:
secret: neuvector-internal
pvc:
enabled: true # setting PVC to true imposes 1 replica https://github.com/neuvector/neuvector-helm/issues/110#issuecomment-1251921734
accessModes:
- ReadWriteOnce
enforcer:
image:
repository: neuvector/enforcer
internal:
certificate:
secret: neuvector-internal
manager:
image:
repository: neuvector/manager
runAsUser: "10000"
ingress:
enabled: true
host: \'{{ .Values.external_hostnames.neuvector }}\'
ingressClassName: nginx
path: /
annotations:
nginx.ingress.kubernetes.io/backend-protocol: https
tls: true
secretName: neuvector-tls
cve:
updater:
podLabels:
tag-validating-policy.sylva.io: excluded
podAnnotations:
kube-score/ignore: container-image-tag
enabled: \'{{ .Values.security.neuvector_scanning_enabled | include "preserve-type" }}\'
image:
repository: neuvector/updater
scanner:
podLabels:
tag-validating-policy.sylva.io: excluded
podAnnotations:
kube-score/ignore: container-image-tag
enabled: \'{{ .Values.security.neuvector_scanning_enabled | include "preserve-type" }}\'
image:
repository: neuvector/scanner
env:
- name: https_proxy
value: \'{{ .Values.proxies.https_proxy }}\'
- name: no_proxy
value: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
internal:
certificate:
secret: neuvector-internal
containerd:
enabled: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" | include "as-bool" }}\'
path: /var/run/containerd/containerd.sock
k3s:
enabled: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | include "as-bool" }}\'
runtimePath: /run/k3s/containerd/containerd.sock
resources:
limits:
cpu: 400m
memory: 2792Mi
requests:
cpu: 100m
memory: 2280Mi
_postRenderers: |
{{- $patch := `
kustomize:
patches:
- target:
kind: CronJob
name: neuvector-updater-pod
patch: |-
- op: replace
path: /spec/startingDeadlineSeconds
value: 21600
- op: add
path: /metadata/labels/tag-validating-policy.sylva.io
value: excluded
- op: add
path: /spec/jobTemplate/spec/template/spec/containers/0/securityContext
value:
runAsNonRoot: true
runAsGroup: 10000
runAsUser: 10000
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
- target:
kind: Deployment
name: neuvector-scanner-pod
patch: |-
- op: add
path: /metadata/labels/tag-validating-policy.sylva.io
value: excluded
` -}}
{{- if .Values.security.neuvector_scanning_enabled -}}
{{- list ($patch | fromYaml) | include "preserve-type" -}}
{{- else -}}
{{- list | include "preserve-type" -}}
{{- end -}}
kustomization_spec:
healthChecks:
- apiVersion: apps/v1
kind: Deployment
name: neuvector-manager-pod
namespace: neuvector
- apiVersion: apps/v1
kind: Deployment
name: neuvector-controller-pod
namespace: neuvector
crossplane-init:
info:
description: sets up Crossplane prerequisites
details: |
it generates CA certificate secret to be used by \'crossplane\' unit
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "crossplane" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
keycloak: \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/crossplane-init
postBuild:
substitute:
CERTIFICATE_NAMESPACE: crossplane-system
_components:
- "../tls-components/sylva-ca"
- \'{{ tuple "./components/providers/keycloak" (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/crossplane-monitoring" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
healthChecks:
- apiVersion: v1
kind: Secret
name: sylva-ca.crt
namespace: crossplane-system
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "namespace" "crossplane-system" "name" "keycloak-provider-secret") (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "namespace" "crossplane-system" "name" "keycloak-internal-tls") (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "namespace" "sylva-system" "name" "keycloak-bootstrap-admin") (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
crossplane-provider-keycloak:
info:
description: Deploys Crossplane Keycloak Provider
details: |
Deploys keycloak-provider and installs CRD\'s
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "crossplane" | include "unit-enabled" }}\'
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
crossplane: true
keycloak: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/crossplane-provider-keycloak
healthChecks:
- apiVersion: pkg.crossplane.io/v1
kind: Provider
name: crossplane-contrib-provider-keycloak
crossplane:
info:
description: Installs Crossplane with RBAC Manager
maturity: stable
unit_templates:
- base-deps
depends_on:
namespace-defs: true
crossplane-init: true
monitoring-crd: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
helm_repo_url: https://charts.crossplane.io/stable
helmrelease_spec:
releaseName: crossplane
targetNamespace: crossplane-system
chart:
spec:
chart: crossplane
version: 2.0.2
install:
createNamespace: false
values:
provider:
packages:
- xpkg.upbound.io/crossplane-contrib/provider-keycloak:v2.1.0
extraVolumesCrossplane:
- name: sylva-ca-crt
secret:
secretName: sylva-ca.crt
extraVolumeMountsCrossplane:
- name: sylva-ca-crt
mountPath: /etc/ssl/certs/ca.crt
subPath: ca.crt
extraEnvVarsCrossplane:
HTTP_PROXY: \'{{ .Values.proxies.http_proxy }}\'
HTTPS_PROXY: \'{{ .Values.proxies.https_proxy }}\'
NO_PROXY: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
rbacManager:
deploy: true
skipAggregatedClusterRoles: false
replicas: 1
leaderElection: true
securityContextCrossplane:
runAsUser: 65532
runAsNonRoot: true
runAsGroup: 65532
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
securityContextRBACManager:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
metrics:
enabled: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
keycloak-init:
info:
description: creates keycloak namespace
details: this unit creates the requirements to deploy keycloak
internal: true
repo: sylva-core
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/namespace-defs/standard-namespace
targetNamespace: keycloak
wait: true
keycloak:
info:
description: initializes and configures Keycloak
maturity: stable
unit_templates:
- base-deps
depends_on:
keycloak-init: true
sylva-ca: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
keycloak-add-truststore: \'{{ tuple . "keycloak-add-truststore" | include "unit-enabled" }}\'
keycloak-postgres: \'{{ tuple . "keycloak-postgres" | include "unit-enabled" }}\'
keycloak-postgresql: \'{{ tuple . "keycloak-postgresql" | include "unit-enabled" }}\'
synchronize-secrets: true # make sure that the secret keycloak-bootstrap-admin is ready to be consummed
annotations:
sylvactl/readyMessage: "Keycloak admin console can be reached at https://{{ .Values.external_hostnames.keycloak }}/admin/master/console, user \'admin\', password in Vault at secret/keycloak ({{ .Values._internal.display_external_ip_msg }})"
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.keycloak.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/keycloak
targetNamespace: keycloak
postBuild:
substitute:
KEYCLOAK_REPLICAS: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
AFFINITY: \'{{ .Values._internal.ha_cluster.is_ha | ternary (.Values._internal.keycloak_affinity | toJson) "{}" }}\'
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
SERVICE: keycloak
SERVICE_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
CERT: \'{{ .Values.external_certificates.keycloak.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
substituteFrom:
- kind: ConfigMap
name: proxy-env-vars
healthChecks: # using only "wait: true" would not give us visibility on the status of the StatefulSet
# the Keycloak StatefulSet set is produced, by the combination of Keycloak operator
# and a Keycloak custom resource, it relies on the postgres DB also deployed by this unit
# hence, checking for the health of this component can be done by checking this StatefulSet
- apiVersion: apps/v1
kind: StatefulSet
name: keycloak
namespace: keycloak
# the rest are objects created by the Kustomization
- apiVersion: k8s.keycloak.org/v2alpha1
kind: Keycloak
name: keycloak
namespace: keycloak
- apiVersion: cert-manager.io/v1
kind: Certificate
name: keycloak-internal-certificate
namespace: keycloak
- apiVersion: networking.k8s.io/v1
kind: Ingress
name: keycloak-ingress
namespace: keycloak
_components:
- \'{{ tuple "components/keycloak-operator-proxies" (.Values.proxies.https_proxy) | include "set-only-if" }}\'
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.keycloak "cert") }}\'
- \'{{ tuple "components/keycloak-truststore" (hasKey .Values.external_certificates "cacert") | include "set-only-if" }}\'
_patches:
- patch: |
- op: replace
path: /spec/template/spec/containers/0/securityContext
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
target:
kind: Deployment
name: keycloak-operator
keycloak-postgresql:
info:
description: Deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)
maturity: stable
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
repo: sylva-core
depends_on:
cnpg-operator: true
keycloak-init: true
keycloak-postgres: \'{{ tuple . "keycloak-postgres" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/keycloak-postgresql
postBuild:
substitute:
replicas: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
storageClass: \'{{ .Values._internal.default_storage_class }}\'
podAntiAffinityType: \'{{ .Values._internal.ha_cluster.is_ha | ternary "required" "preferred" }}\'
_components:
# Add component to import data from the db managed by the deprecated unit ...
- \'{{ tuple "./components/keycloak-import-db" (tuple . "keycloak-postgres" | include "unit-enabled") | include "set-only-if" }}\'
healthChecks:
- apiVersion: postgresql.cnpg.io/v1
kind: Cluster
name: keycloak-postgresql
namespace: keycloak
healthCheckExprs:
# CNPG does not expose a kstatus compatible status (missing observedGeneration), we use status.phase instead
# It seems to report an accurate view, see https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.27.0/api/v1/cluster_types.go#L642
- apiVersion: postgresql.cnpg.io/v1
kind: Cluster
current: status.phase == "Cluster in healthy state"
failed: status.phase != "Cluster in healthy state"
keycloak-legacy-operator:
info:
description: installs Keycloak "legacy" operator
maturity: stable
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
vault-secrets: true # the credential-external-keycloak Secret use by the legacy operator is generated from ES/Vault secret/data/keycloak
keycloak: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/keycloak-legacy-operator
targetNamespace: keycloak
postBuild:
substitute:
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
wait: true
_patches:
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: keycloak-realm-operator
spec:
template:
spec:
containers:
- name: keycloak-realm-operator
securityContext:
runAsUser: 10000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
target:
kind: Deployment
name: keycloak-realm-operator
keycloak-resources:
info:
description: configures keycloak resources
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
keycloak: true
keycloak-legacy-operator: true
repo: sylva-core
kustomization_substitute_secrets:
SSO_PASSWORD: \'{{ .Values.admin_password }}\'
kustomization_spec:
path: ./kustomize-units/keycloak-resources
targetNamespace: keycloak
_components:
- \'{{ tuple "components/neuvector" (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/harbor" (tuple . "harbor" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/grafana" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/kunai" (tuple . "kunai" | include "unit-enabled") | include "set-only-if" }}\'
postBuild:
substitute:
GRAFANA_DNS: \'{{ .Values.external_hostnames.grafana }}\'
RANCHER_DNS: \'{{ .Values.external_hostnames.rancher }}\'
FLUX_WEBUI_DNS: \'{{ .Values.external_hostnames.flux }}\'
HARBOR_DNS: \'{{ .Values.external_hostnames.harbor }}\'
NEUVECTOR_DNS: \'{{ tuple .Values.external_hostnames.neuvector (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\'
VAULT_DNS: \'{{ .Values.external_hostnames.vault }}\'
EXPIRE_PASSWORD_DAYS: \'{{ int .Values.keycloak.keycloak_expire_password_days }}\'
KUNAI_DNS: \'{{ tuple .Values.external_hostnames.kunai (tuple . "kunai" | include "unit-enabled") | include "set-only-if" }}\'
healthChecks: # cannot use __wait: true__ here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/144
- apiVersion: legacy.k8s.keycloak.org/v1alpha1
kind: KeycloakRealm
name: sylva
namespace: keycloak
- apiVersion: v1
kind: Secret
name: keycloak-client-secret-rancher-client # this secret is a byproduct of the rancher-client KeycloakClient resource
namespace: keycloak
- apiVersion: v1
kind: Secret
name: keycloak-client-secret-flux-webui-client # this secret is a byproduct of the flux-webui-client KeycloakClient resource
namespace: keycloak
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "name" "keycloak-client-secret-neuvector-client" "namespace" "keycloak") (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\' # # this secret is a byproduct of the neuvector client KeycloakClient resource
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "name" "keycloak-client-secret-harbor-client" "namespace" "keycloak") (tuple . "harbor" | include "unit-enabled") | include "set-only-if" }}\' # # this secret is a byproduct of the harbor client KeycloakClient resource
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "name" "keycloak-client-secret-grafana-client" "namespace" "keycloak") (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\' # # this secret is a byproduct of the grafana client KeycloakClient resource
- \'{{ tuple (dict "apiVersion" "v1" "kind" "Secret" "name" "keycloak-client-secret-kunai-client" "namespace" "keycloak") (tuple . "kunai" | include "unit-enabled") | include "set-only-if" }}\' # # this secret is a byproduct of the grafana client KeycloakClient resource
keycloak-add-client-scope:
info:
description: configures Keycloak client-scope
details: >
a job to manually add a custom client-scope to sylva realm (on top of default ones)
while CRD option does not yet provide good results (overrides defaults)
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
keycloak-resources: true
keycloak: true # defines the keycloak-bootstrap-admin Secret used by the script
kustomization_spec:
targetNamespace: keycloak
postBuild:
substitute:
RUNASUSER: \'10000\'
RUNASGROUP: \'10000\'
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/keycloak-add-client-scope.sh") }}\'
# Workaround for realm role creation
keycloak-add-realm-role:
info:
description: Creates Keycloak realm role
details: >
a job to manually create a custom realm role to sylva realm (on top of default ones) and assigns it to sylva-admin
while CRD option does not allow updates.
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
keycloak-resources: true
keycloak: true # defines the keycloak-bootstrap-admin Secret used by the script
kustomization_spec:
postBuild:
substitute:
NAMESPACE: keycloak
RUNASUSER: \'10000\'
RUNASGROUP: \'10000\'
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/keycloak-add-realm-role.sh") }}\'
keycloak-add-truststore:
info:
description: configures Keycloak truststore
details: >
a job to manually add a truststore to Keycloak instance, e.h. to enable LDAPS protocol when using user federation)
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
- \'{{ hasKey .Values.external_certificates "cacert" }}\'
depends_on:
vault: true
keycloak-init: true
sylva-ca: true
external-secrets-operator: true
kustomization_spec:
postBuild:
substitute:
NAMESPACE: keycloak
CERTIFICATE_NAMESPACE: keycloak
_components:
- "../tls-components/sylva-ca"
_patches:
- \'{{ include "kube-job-replace-image-patch" .Values._internal.keytool_image }}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/keycloak-add-truststore.sh") }}\'
keycloak-oidc-external-secrets:
info:
description: configures OIDC secrets for Keycloak
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
depends_on:
keycloak: true
keycloak-legacy-operator: true
keycloak-resources: true
eso-secret-stores: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/keycloak-oidc-external-secrets
postBuild:
substitute:
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
FLUX_WEBUI_DNS: \'{{ .Values.external_hostnames.flux }}\'
wait: false
healthChecks:
- >-
{{ tuple (dict
"apiVersion" "v1"
"kind" "Secret"
"name" "grafana-oidc"
"namespace" "cattle-monitoring-system"
)
(tuple . "monitoring" | include "unit-enabled")
| include "set-only-if"
}}
# this secret is a byproduct of the eso-grafana-oidc ExternalSecret resource
- >-
{{ tuple (dict
"apiVersion" "v1"
"kind" "Secret"
"name" "oidc-auth"
"namespace" "flux-system"
)
(tuple . "flux-webui" | include "unit-enabled")
| include "set-only-if"
}}
# this secret is a byproduct of the eso-flux-webui-oidc ExternalSecret resource
- >-
{{ tuple (dict
"apiVersion" "v1"
"kind" "Secret"
"name" "keycloakoidcconfig-clientsecret"
"namespace" "cattle-global-data"
)
(tuple . "rancher" | include "unit-enabled")
| include "set-only-if" }}
# this secret is a byproduct of the eso-rancher-oidc ExternalSecret resource
_components:
- \'{{ tuple "components/grafana-oidc" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/flux-webui-oidc" (tuple . "flux-webui" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/rancher-oidc" (tuple . "rancher" | include "unit-enabled") | include "set-only-if" }}\'
sbom-operator:
info:
description: installs SBOM operator
maturity: beta
enabled: no
unit_templates:
- base-deps
depends_on:
namespace-defs: true
helm_repo_url: https://ckotzbauer.github.io/helm-charts
helmrelease_spec:
chart:
spec:
chart: sbom-operator
version: 0.36.0
targetNamespace: sbom-operator
values:
args:
targets: \'{{ .Values.security.sbom_operator.targets | join "," }}\'
dtrack-base-url: \'{{ tuple .Values.security.sbom_operator.dtrack_base_url (hasKey .Values.security.sbom_operator "dtrack_base_url") | include "set-only-if" }}\'
oci-registry: \'{{ tuple .Values.security.sbom_operator.oci_registry (hasKey .Values.security.sbom_operator "dtrack_base_url") | include "set-only-if" }}\'
format: cyclonedx
cron: "*/30 * * * *" # the cron value allows the operator to periodically checks for changed images in the cluster
envVars:
- name: https_proxy
value: \'{{ .Values.proxies.https_proxy }}\'
- name: http_proxy
value: \'{{ .Values.proxies.https_proxy }}\'
- name: no_proxy
value: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
helm_secret_values:
args:
dtrack-api-key: \'{{ tuple .Values.security.sbom_operator.dtrack_api_key (hasKey .Values.security.sbom_operator "dtrack-api-key") | include "set-only-if" }}\'
oci-user: \'{{ tuple .Values.security.sbom_operator.oci_user (hasKey .Values.security.sbom_operator "oci-user") | include "set-only-if" }}\'
oci-token: \'{{ tuple .Values.security.sbom_operator.oci_token (hasKey .Values.security.sbom_operator "oci-token") | include "set-only-if" }}\'
kyverno:
info:
description: installs Kyverno
maturity: core-component
unit_templates: [] # we intendedly don\'t inherit from base-deps, because kyverno is itself part of base dependencies
depends_on:
monitoring-crd: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
helm_repo_url: https://kyverno.github.io/kyverno
helmrelease_spec:
chart:
spec:
chart: kyverno
version: 3.4.4
targetNamespace: kyverno
timeout: 10m
install:
createNamespace: true
values:
config:
resourceFiltersExclude:
- "[Node,*,*]"
- "[Node/*,*,*]"
# ##### how we exclude kube-system from Kyverno webhooks (but not completely) ###########################
# #
# It is a generalized safeguard to not let Kyverno define webhooks on kube-system namespace. #
# This is the default Kyverno chart configuration (namespaceSelector under config.webhook). #
# #
# In Sylva we have policy for which we *need* some validation webhooks in kube-system: #
# * rke2-helmchart-prevent-uninstall #
# * block-rke2-uninstall-jobs policies #
# .
# This comment groups the different things done to allow excluding the kube-system namespace
# except for Jobs.
#
# Instead of using a \'namespaceSelector\' we use global \'matchConditions\'.
# .
# /!\\ There is however a side effect: policies that use spec.webhookConfiguration.matchConditions
# will **not** exclude kube-system (this field overrides the global \'matchConditions\').
# So any policy using spec.webhookConfiguration.matchConditions needs to include a CEL expression
# to exclude the kube-system namespace.
#
- "[*/*,kube-system,*]" # cancels this entry which is present in chart default resourceFilters
#
webhooks:
# cancels the namespaceSelector present in default values that excludes the kube-system namespace.
# This has the side effect of ignoring excludeKyvernoNamespace so we have to explicitly set it here.
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: NotIn
values:
- kyverno
matchConditions: # ... and readds it via matchConditions to exclude kube-system :
# # but, as said above, this applies only to policies that do not set webhookConfiguration #
# # (CEL expression below tests ns exclusion except for non-namespaced resources) #
- expression: \'has(request.namespace) ? (request.namespace != "kube-system") : true\' #
name: ns-not-kube-system-global #
# #########################################################################################################
webhooksCleanup:
enabled: false
image:
repository: alpine/kubectl
tag: 1.34.1
features:
policyExceptions:
enabled: true
namespace: "*"
admissionController:
crdWatcher: true
replicas: \'{{ .Values._internal.default_replicas | include "preserve-type" }}\'
rbac:
clusterRole:
extraResources:
- apiGroups: ["metal3.io"]
resources: ["baremetalhosts"]
verbs: ["get", "list", "watch"]
- apiGroups: ["minio.min.io"]
resources: ["tenants"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"]
resources: [ "secrets" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalnetworksets
verbs: ["create", "update", "patch", "delete", "get", "list", "watch"]
- apiGroups: ["ipam.metal3.io"]
resources:
- ippools
verbs: ["get", "list", "watch"]
- apiGroups: ["infrastructure.cluster.x-k8s.io"]
resources:
- openstackmachines
- metal3machines
verbs: ["get", "list", "watch"]
- apiGroups: ["helm.toolkit.fluxcd.io"]
resources:
- helmreleases
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get", "list", "patch", "update", "watch", "create", "delete"]
- apiGroups: ["apps"]
resources:
- daemonsets
verbs: ["get", "list", "patch", "update", "watch", "create"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
container:
resources:
limits:
memory: 1024Mi
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
cleanupController:
replicas: \'{{ .Values._internal.default_replicas | include "preserve-type" }}\'
rbac:
clusterRole:
extraResources:
- apiGroups: [""]
resources:
- nodes
- secrets
- services
verbs: ["get", "list", "watch", "patch", "update", "delete"]
- apiGroups: ["longhorn.io"]
resources:
- engineimages
verbs: ["get", "list", "watch", "patch", "update", "delete"]
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
reportsController:
replicas: \'{{ .Values._internal.default_replicas | include "preserve-type" }}\'
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
resources:
limits:
memory: 256Mi
requests:
memory: 128Mi
rbac:
clusterRole:
extraResources:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "watch"]
backgroundController:
replicas: \'{{ .Values._internal.default_replicas | include "preserve-type" }}\'
resources:
limits:
memory: 1024Mi
requests:
memory: 256Mi
resyncPeriod: 5m
rbac:
clusterRole:
extraResources:
- apiGroups: [""]
resources:
- "secrets"
verbs: ["get", "list", "patch", "update", "watch", "create", "delete"]
- apiGroups: ["apps"]
resources:
- daemonsets
verbs: ["get", "list", "patch", "update", "watch", "create"]
- apiGroups: ["management.cattle.io"] # for set-monitoring-chart-cluster-id ClusterPolicy
resources:
- "clusters"
verbs: ["get", "list", "watch"]
- apiGroups: ["provisioning.cattle.io"] # for set-monitoring-chart-cluster-id ClusterPolicy
resources:
- "clusters"
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources:
- "cronjobs"
- "jobs"
verbs: ["get", "list", "patch", "update", "watch"]
- apiGroups: ["minio.min.io"]
resources: ["tenants"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
- apiGroups: ["vault.banzaicloud.com"]
resources:
- "vaults"
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["apps"]
resources:
- "statefulsets"
- "deployments"
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["helm.toolkit.fluxcd.io"]
resources:
- "helmreleases"
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["external-secrets.io"]
resources:
- "externalsecrets"
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["unitsoperator.sylva"]
resources:
- "sylvaunitsreleasetemplates"
verbs: ["get", "list", "watch", "patch", "update", "create", "delete"]
- apiGroups: ["cluster.x-k8s.io"]
resources:
- machines
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources:
- nodes
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources:
- serviceaccounts
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["metallb.io"]
resources:
- ipaddresspools
- l2advertisements
- bgppeers
- bgpadvertisements
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalnetworksets
verbs: ["create", "update", "patch", "delete", "get", "list", "watch"]
- apiGroups: ["infrastructure.cluster.x-k8s.io"]
resources:
- openstackmachines
- metal3machines
verbs: ["get", "list", "watch"]
- apiGroups: ["ipam.metal3.io"]
resources:
- ippools
verbs: ["get", "list", "watch"]
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs: ["get", "list", "watch"]
- apiGroups: ["metal3.io"]
resources:
- baremetalhosts
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["longhorn.io"]
resources:
- "volumes"
verbs: ["get", "list", "watch", "patch", "update"]
# for rke2-helmchart-prevent-uninstall policy:
- >-
{{
tuple (dict "apiGroups" (list "helm.cattle.io")
"resources" (list "helmcharts")
"verbs" (list "get" "list" "watch" "patch" "update")
)
(eq .Values.cluster.capi_providers.bootstrap_provider "cabpr")
| include "set-only-if"
}}
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
policyReportsCleanup:
image:
repository: alpine/kubectl
tag: 1.34.1
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
kyverno-policies:
info:
description: configures Kyverno policies
internal: true
unit_templates: [] # we intendedly don\'t inherit from base-deps, because kyverno-policies is itself part of base dependencies
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
depends_on:
kyverno: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/generic
wait: true
postBuild:
substitute:
CLUSTER_NAME: \'{{ .Values.cluster.name }}\'
SIGN_MGMT: \'{{ .Values.security.upstream_images_signature.policy_action }}\'
SIG_REPO: \'{{ .Values.security.upstream_images_signature.repository }}\'
DIGEST: \'{{ .Values.security.upstream_images_signature.policy_action | eq "Enforce" }}\'
COSIGN_PUBLIC_KEY: \'{{ .Values.security.upstream_images_signature.cosign_public_key | quote }}\'
IMAGE_LIST: \'{{ .Values.security.upstream_images_signature.images_list | toJson }}\'
COREDNS_DEPLOYMENT_NAME: \'{{ tuple (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" | ternary "coredns" "rke2-coredns-rke2-coredns") (tuple . "cabpoa" | include "unit-enabled") | include "set-only-if" }}\'
COREDNS_CONFIGMAP_NAME: \'{{ tuple (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" | ternary "coredns" "rke2-coredns-rke2-coredns") (tuple . "cabpoa" | include "unit-enabled") | include "set-only-if" }}\'
_components:
- \'{{ tuple "components/bootstrap-cluster-only" .Values._internal.bstrp_cluster | include "set-only-if" }}\'
- \'{{ tuple "components/management-cluster-only" .Values._internal.mgmt_cluster | include "set-only-if" }}\'
- \'{{ tuple "components/descheduler" (tuple . "descheduler" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/loki-aggregated-secrets" (tuple . "loki" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/loki-aggregated-secrets-cleanup" (and (tuple . "loki" | include "unit-enabled") (not .Values._internal.state.is_upgrade)) | include "set-only-if" }}\'
- \'{{ tuple "components/thanos" (tuple . "thanos" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/check-images" .Values.security.upstream_images_signature.verify | include "set-only-if" }}\'
- \'{{ tuple "components/mutate-job-security-context" (tuple . "rancher" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/neuvector-policies" (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\'
- ../exclude-kube-system # needs to be last (needed for components above that use sylva.org/kyverno-exclude-kube-system annotation)
- \'{{ tuple "components/coredns-custom-hosts-import" (tuple . "cabpoa" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/grafana-oidc" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
kyverno-policies-ready:
info:
description: additional delay to ensure that kyverno webhooks are properly installed in api-server
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "kyverno-policies" | include "unit-enabled" }}\'
kustomization_spec:
_patches:
- \'{{ include "kube-job-replace-script-patch-inline" "sleep 60" }}\'
kyverno-vault-restart-policy:
info:
description: restart vault after certs renewal
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
depends_on:
kyverno: true
vault: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/kyverno-vault-restart-policy
wait: true
kyverno-metal3-policies:
info:
description: kyverno policies specific to capm3-system
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
- \'{{ tuple . "metal3" | include "unit-enabled" }}\'
depends_on:
kyverno: true
metal3: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/metal3-policies
wait: true
kyverno-update-namespace-and-psa:
info:
description: grants to Kyverno the permission to update namespaces using the "updatepsa" verb (Rancher-specific)
internal: true
details: >
This unit allows Kyverno to define namespaces with specific PodSecurityAdmission levels.
It is useful for situations where namespaces need to be mutated (with PSA labels)
in order to accomodate privileged pods (for which PSA level restricted at cluster level is not enough),
when namespace creation is not controlled
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
depends_on:
kyverno: true
rancher: true # updatepsa verb for projects resources in management.cattle.io apiGroup permission is Rancher specific
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-psa
wait: true
rancher-monitoring-clusterid-inject:
info:
description: injects Rancher cluster ID in Helm values of Rancher monitoring chart
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
depends_on:
kyverno: \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
rancher: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/rancher-monitoring-clusterid-inject
wait: true
mgmt-cluster-state-values:
info:
description: manages workload cluster parameters which reflect management cluster state
internal: true
unit_templates:
- base-deps
depends_on:
kyverno: true
sylva-units-operator: \'{{ tuple . "sylva-units-operator" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/mgmt-cluster-state-values
wait: true
force: true
targetNamespace: sylva-system
_patches:
- target:
kind: ConfigMap
patch: |
- op: replace
path: /data/values
value: |
{{- tuple . "mgmt_cluster_state_values" | include "interpret" }}
{{ .Values.mgmt_cluster_state_values | toYaml | indent 4 }}
_components:
- \'{{ tuple "components/capm3" (tuple . "capm3" | include "unit-enabled") | include "set-only-if" }}\'
capi:
info:
description: installs Cluster API core operator
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/capi
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
capd:
info:
description: installs Docker CAPI infra provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/capd
postBuild:
substitute:
CAPD_DOCKER_HOST: \'{{ .Values.capd_docker_host }}\'
substituteFrom:
- kind: ConfigMap
name: proxy-env-vars
optional: true
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
capo-orc:
info:
description: installs OpenStack Resource Controller (orc)
details: |
The [OpenStack Resource Controller](https://k-orc.cloud/) (a.k.a. ORC) is a component used by CAPO controller
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "capo" | include "unit-enabled" }}\'
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/capo-orc
wait: true
capo:
info:
description: installs OpenStack CAPI infra provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
capo-orc: true
kustomization_spec:
path: ./kustomize-units/capo
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
_patches:
- target:
group: apps
version: v1
kind: Deployment
name: capo-controller-manager
namespace: capo-system
patch: |-
- op: add
path: /spec/template/spec/containers/0/securityContext
value:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
metal3-ipam:
info:
description: installs IP Address Manager for Metal3 CAPI provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
capi: true
kustomization_spec:
path: ./kustomize-units/metal3-ipam
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
capm3:
info:
description: installs Metal3 CAPI infra provider, for baremetal
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
metal3-ipam: true
metal3: true
kustomization_spec:
path: ./kustomize-units/capm3
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
ENABLE_BMH_NAME_BASED_PREALLOCATION: "true"
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
capv:
info:
description: installs vSphere CAPI infra provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/capv
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
capone:
info:
description: installs OpenNebula CAPONE infra provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/capone
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
cabpk:
info:
description: installs Kubeadm CAPI bootstrap provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/cabpk
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
cabpck:
info:
description: installs Canonical CAPI bootstrap provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/cabpck
postBuild:
substitute:
force_var_substitution_enabled: "true" # dummy value to ensure substitution of defaults
wait: true
cabpr:
info:
description: installs RKE2 CAPI bootstrap provider
maturity: core-component
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
kustomization_spec:
path: ./kustomize-units/cabpr
wait: true
_components:
- \'{{ tuple "components/ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
postBuild:
substitute:
var_substitution_enabled: "true" # To force substitution of default controller diagnostics flags
_patches:
- target:
group: apps
version: v1
kind: Deployment
name: rke2-bootstrap-controller-manager
namespace: rke2-bootstrap-system
patch: |-
- op: add
path: /spec/template/spec/containers/0/securityContext
value:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
- target:
group: apps
version: v1
kind: Deployment
name: rke2-control-plane-controller-manager
namespace: rke2-control-plane-system
patch: |-
- op: add
path: /spec/template/spec/containers/0/securityContext
value:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
cabpoa:
info:
description: installs OKD/OpenShift CAPI bootstrap/controlplane provider
maturity: experimental
enabled: false
repo: sylva-core
unit_templates:
- base-deps
depends_on:
capm3: true
openshift-assisted-installer: true
kustomization_spec:
path: ./kustomize-units/cabpoa
wait: true
openshift-assisted-installer:
info:
description: installs assisted installer operator for OKD
maturity: experimental
repo: sylva-core
unit_templates:
- base-deps
depends_on:
cert-manager: true
metal3: true
kyverno: true # test workaround for https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/2654#note_2114765991
\'{{ .Values._internal.default_storage_class_unit }}\': true
enabled_conditions:
- \'{{ tuple . "cabpoa" | include "unit-enabled" }}\'
annotations:
sylvactl/readyMessage: "{{ .Values.openshift.assisted.serviceHostname | default .Values.external_hostnames.openshift_assisted_service }} and {{ .Values.openshift.assisted.imageHostname | default .Values.external_hostnames.openshift_assisted_images }} must resolve to {{ .Values.display_external_ip }}"
kustomization_spec:
path: ./kustomize-units/openshift-assisted-installer
postBuild:
substitute:
ASSISTED_SERVICE_HOSTNAME: \'{{ .Values.openshift.assisted.serviceHostname | default .Values.external_hostnames.openshift_assisted_service }}\'
ASSISTED_IMAGE_HOSTNAME: \'{{ .Values.openshift.assisted.imageHostname | default .Values.external_hostnames.openshift_assisted_images }}\'
ASSISTED_DB_SIZE: \'{{ .Values.openshift.assisted.dbSize }}\'
ASSISTED_FS_SIZE: \'{{ .Values.openshift.assisted.fsSize }}\'
ASSISTED_IMAGESTORE_SIZE: \'{{ .Values.openshift.assisted.imagestoreSize }}\'
ASSISTED_OS_IMAGES: \'{{ .Values.openshift.assisted.osImages | toJson }}\'
HTTP_PROXY: \'{{ .Values.proxies.http_proxy }}\'
HTTPS_PROXY: \'{{ .Values.proxies.https_proxy }}\'
NO_PROXY: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
healthChecks:
- apiVersion: apps/v1
kind: StatefulSet
name: assisted-image-service
namespace: assisted-installer
- apiVersion: apps/v1
kind: Deployment
name: assisted-service
namespace: assisted-installer
openshift-security-context-constraints:
info:
description: sets up openshift security context constraints for operators not installed via RedHat operator lifecycle manager(OLM)
internal: true
unit_templates:
- base-deps
enabled: \'{{ eq .Values.cluster.capi_providers.bootstrap_provider "cabpoa" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/openshift-security-context-constraints
wait: true
_components:
- \'{{ tuple "components/ceph-csi-cephfs" (tuple . "ceph-csi-cephfs" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/goldpinger" (tuple . "goldpinger" | include "unit-enabled") | include "set-only-if" }}\'
metal3-sylva-ca-init:
info:
description: injects sylva-ca certificate in metal3
details: this certificate is needed to download baremetal os images via https
internal: true
repo: sylva-core
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "metal3" | include "unit-enabled" }}\'
- \'{{ tuple . "os-image-server" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
ingress-nginx-init: true
external-secrets-operator: true
kustomization_spec:
path: ./kustomize-units/sylva-ca-external-secret
wait: true
targetNamespace: metal3-system
postBuild:
substitute:
CERTIFICATE_NAMESPACE: metal3-system
CACERT_SECRET_NAME: tls-ca-additional # this Secret is used by metal3 chart when additionalTrustedCAs is true
CERT_FILE_NAME: sylva-ca.crt
metal3:
info:
description: installs SUSE-maintained Metal3 operator
maturity: stable
enabled: false
unit_templates:
- base-deps
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
depends_on:
metal3-sylva-ca-init: \'{{ tuple . "metal3-sylva-ca-init" | include "unit-enabled" }}\'
namespace-defs: true
cert-manager: true
\'{{ .Values._internal.default_storage_class_unit }}\': \'{{ not .Values._internal.state.is_upgrade }}\'
metal3-pdb: \'{{ tuple . "metal3-pdb" | include "unit-enabled" }}\'
metallb-resources: \'{{ tuple . "metallb" | include "unit-enabled" }}\'
helm_repo_url: https://suse-edge.github.io/charts
helm_chart_artifact_name: metal3
helmrelease_spec:
chart:
spec:
chart: metal3
version: 0.12.7
timeout: 30m
targetNamespace: metal3-system
values:
global:
additionalTrustedCAs: true # results in having the chart use the metal3-system/tls-ca-additional Secret which is produced by metal3-sylva-ca-init unit
ironicIP: \'{{ .Values.display_external_ip }}\'
provisioningInterface: eth0
metal3-ironic:
images:
ironicIPADownloader:
repository: registry.opensuse.org/isv/suse/edge/containers/images/ironic-ipa-downloader-x86_64
fullnameOverride: metal3-ironic
service:
type: LoadBalancer
annotations: \'{{ .Values._internal.lb_service_annotations | default dict | include "preserve-type" }}\'
baremetaloperator:
ironichostNetwork: false
persistence:
ironic:
size: 10Gi
storageClass: \'{{ .Values._internal.default_storage_class }}\'
accessMode: ReadWriteOnce
metal3-baremetal-operator:
replicaCount: \'{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}\'
_postRenderers:
- kustomize:
patches:
- patch: |
kind: Service
apiVersion: v1
metadata:
name: metal3-ironic
spec:
type: LoadBalancer
loadBalancerClass: \'{{ .Values._internal.loadBalancerClass }}\'
metal3-pdb:
info:
description: add pdb to baremetal-operator pods
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "metal3" | include "unit-enabled" }}\'
- \'{{ .Values._internal.ha_cluster.is_ha }}\'
depends_on:
namespace-defs: true
kyverno: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/metal3-pdb
wait: true
local-path-provisioner:
info:
description: installs local-path CSI
maturity: stable
repo: local-path-provisioner
unit_templates:
- base-deps
helmrelease_spec:
chart:
spec:
chart: deploy/chart/local-path-provisioner
targetNamespace: kube-system
install:
createNamespace: true
values:
storageClass:
defaultClass: \'{{ .Values._internal.default_storage_class | eq "local-path" | include "as-bool" }}\'
nodePathMap:
- node: DEFAULT_PATH_FOR_NON_LISTED_NODES
paths:
- /var/lib/kubelet/local-path-provisioner
helperImage:
repository: docker.io/library/busybox
tag: 1.37.0
cluster-bmh:
info:
description: definitions for Cluster API BareMetalHosts resources (capm3)
maturity: core-component
labels:
sylva-units/protected: ""
repo: sylva-capi-cluster
helm_chart_artifact_name: sylva-capi-cluster
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" }}\'
#depends_on: the dependency on metal3 is handled in management.values.yaml
# and workload-cluster.values.yaml
kustomization_spec:
prune: \'{{ not (eq .Release.Namespace "sylva-system") | include "preserve-type" }}\'
helmrelease_spec:
driftDetection:
# The \'online\' parameter is managed by BareMetal and Metal3 operators,
# while \'automatedCleaningMode\' is managed by a Kyverno policy (https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/3073).
# Therefore, drift detection should ignore these fields.
ignore:
- target:
kind: BareMetalHost
paths:
- /spec/online
- /spec/automatedCleaningMode
- target:
kind: Secret
paths:
- /metadata/labels/environment.metal3.io
targetNamespace: \'{{ .Release.Namespace }}\'
chart:
spec:
chart: charts/sylva-capi-cluster
values:
resource_groups:
- baremetal-hosts
# we pass everything that is under `cluster` to this unit that uses sylva-capi-cluster chart
# (we do it via a secret because some of the values are credentials in many scenarios)
helm_secret_values: \'{{ .Values.cluster | include "preserve-type" }}\'
cluster-node-provider-id-blacklist:
info:
description: ValidatingAdmissionPolicy to prevent nodes from being recreated with a providerID that has already been used
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "misc-controllers-suite" | include "unit-enabled" }}\'
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" }}\'
depends_on:
misc-controllers-suite: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/cluster-node-provider-id-blacklist
wait: true
rke2-helmchart-prevent-uninstall:
info:
description: Kyverno policy to prevent key Helm charts from being uninstalled by RKE2 HelmChart controller
internal: true
unit_templates:
- \'{{ tuple "base-deps" (not .Values._internal.state.is_upgrade ) | include "set-only-if" }}\'
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
depends_on:
kyverno: true
# on an upgrade we don\'t want this unit to be applied after \'cluster\' unit reconciliation,
# because we want the policy to be active during node rolling update
# but on a fresh install, we setup this policy only after \'cluster\' unit (the first
# RKE2 installation installs the RKE2 HelmChart CRD):
cluster: \'{{ and (not .Values._internal.state.is_upgrade)
(tuple . "cluster" | include "unit-enabled") }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/rke2-helmchart-prevent-uninstall
wait: true
cluster-maxunavailable-apply:
info:
description: ensure that cluster-maxunavailable effective before updating \'cluster\' unit
internal: true
unit_templates:
- kube-job
enabled_conditions: # we enable this unit only if we transition from "cluster-maxunavailable not used"
# to "cluster-maxunavailable used", because it\'s only in that case that we
# need to ensure that all Machines have the pre-drain hook before the cluster unit
# is updated
# cluster-maxunavailable functionality is wanted:
- \'{{ hasKey (.Values.cluster | dig "annotations" "cluster" dict) "cluster-maxunavailable.sylva.org/enabled" }}\'
# cluster-maxunavailable functionality is not yet setup:
- >-
{{- if .Values._internal.state.is_upgrade -}}
{{- lookup "cluster.x-k8s.io/v1beta1" "Cluster" .Release.Namespace .Values.cluster.name
| dig "metadata" "annotations" "cluster-maxunavailable.sylva.org/enabled" "-unset-"
| eq "-unset-" -}}
{{- end -}}
kustomization_spec:
_patches:
- \'{{ include "kube-job-add-env-var-patch" (dict
"CLUSTER_NAME" .Values.cluster.name
)}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/cluster-apply-cluster-maxunavailable.sh") }}\'
cluster:
info:
description: holds the Cluster API definition for the cluster
maturity: core-component
labels:
sylva-units/protected: ""
repo: sylva-capi-cluster
unit_templates: []
helm_chart_artifact_name: sylva-capi-cluster
depends_on:
# see management.values.yaml and workload-cluster.values.yaml to see how
# we handle the fact that this unit depends on the CAPI-related components being ready
os-images-info: \'{{ list "capo" "capm3" | has .Values.cluster.capi_providers.infra_provider }}\'
capo-cluster-resources: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
capo-network-settings: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
sync-openstack-images: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
cluster-bmh: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" }}\'
cluster-node-provider-id-blacklist: \'{{ tuple . "cluster-node-provider-id-blacklist" | include "unit-enabled" }}\'
longhorn-instance-manager-cleanup: \'{{ tuple . "longhorn-instance-manager-cleanup" | include "unit-enabled" }}\'
rke2-helmchart-prevent-uninstall: \'{{ and (tuple . "rke2-helmchart-prevent-uninstall" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
metallb: \'{{ and (tuple . "metallb" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
calico: \'{{ .Values._internal.state.is_upgrade }}\'
calico-ready: \'{{ .Values._internal.state.is_upgrade }}\'
cluster-garbage-collector: \'{{ and (tuple . "cluster-garbage-collector" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
misc-controllers-suite: \'{{ and (tuple . "misc-controllers-suite" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
cluster-maxunavailable-apply: \'{{ tuple . "cluster-maxunavailable-apply" | include "unit-enabled" }}\'
longhorn-update-stale-replica-timeout: \'{{ and (tuple . "longhorn-update-stale-replica-timeout" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
kustomization_spec:
prune: \'{{ not (eq .Release.Namespace "sylva-system") | include "preserve-type" }}\'
# we wait on all important resources built by sylva-capi-cluster,
# *except* the MachineDeployments, since if we\'re using kubeadm as bootstrap
# we would have a deadlock: the default CNI unit would not deploy
# before the cluster unit is ready, and the cluster would not be ready until
# the CNI is deployed because the MachineDeployment nodes need the CNI to become
# ready (for the controlplane nodes, the kubeadm controlplane provider ignores that)
healthChecks: >-
{{
(include "cluster-healthchecks" (dict "ns" .Release.Namespace "cluster" .Values.cluster "includeMDs" false "onlyCheckKubeConfig" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpck")) | fromYaml).result
| include "preserve-type"
}}
helmrelease_spec:
uninstall:
timeout: \'{{ mul .Values.cluster.timeouts.cluster_delete_hook_job_timeout 1.2 }}s\'
targetNamespace: \'{{ .Release.Namespace }}\'
chart:
spec:
chart: charts/sylva-capi-cluster
values:
resource_groups:
- cluster-resource
- base
unset_cluster_paused: \'{{ .Release.IsUpgrade | include "preserve-type" }}\'
freeze_first_node_files: \'{{ .Values._internal.state.is_upgrade | include "preserve-type" }}\'
valuesFrom:
- kind: ConfigMap
name: capo-cluster-resources
valuesKey: allocated_ip
targetPath: cluster_virtual_ip
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
- kind: ConfigMap
name: capo-cluster-resources
valuesKey: allocated_fip
targetPath: cluster_public_ip
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
- kind: ConfigMap
name: capo-cluster-resources
valuesKey: control_plane_servergroup_id
targetPath: control_plane.capo.server_group_id
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
- kind: ConfigMap
name: capo-cluster-resources
valuesKey: worker_servergroup_id
targetPath: machine_deployment_default.capo.server_group_id
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
- kind: ConfigMap
name: openstack-images-uuids ## this ConfigMap is a byproduct of the sync-openstack-images job
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
# os-images-info ConfigMap
#
# this ConfigMap is a byproduct of the os-images-info unit
#
# it is used for capo and capm3
#
# for capm3 workload clusters it is important to have it as an override of the kyverno-cloned-os-images-info-capm3
# to ensure that whenever there would be an image with the same image key in workload and mgmt context but for a
# different content, the workload cluster sylva-capi-cluster chart receives the one coming from the os-images-info
# computed by the workload cluster sylva-units Helm release based on the value of the workload cluster
- |
{{-
tuple
(dict "kind" "ConfigMap"
"name" "os-images-info")
(or (.Values.cluster.capi_providers.infra_provider | eq "capm3")
(.Values.cluster.capi_providers.infra_provider | eq "capo")
)
| include "set-only-if"
-}}
# ConfigMap for capm3_os_image_server_images
# this ConfigMap is a byproduct of the os-image-server unit (from mgmt cluster)
#
# it is used for capm3 only
#
# for the mgmt cluster, the configmap is the capm3-os-image-server-os-images ConfigMap produced by os-image-server unit
# for for workload clusters, this ConfigMap is a Kyverno-cloned copy, in each cluster namespace, of the output ConfigMap of the os-image-server unit
# which reflects what the os-image-server unit in mgmt context is currently serving
- |
{{-
tuple
(dict "kind" "ConfigMap"
"name" .Values._internal.os_image_server_images_configmap)
(.Values.cluster.capi_providers.infra_provider | eq "capm3")
| include "set-only-if"
-}}
# ConfigMaps to inject allocated_ip and mtu in Calico configuration
#
# only used for capo
# byproduct of the capo-cluster-resources and capo-network-settings units
- |
{{-
tuple
(dict "kind" "ConfigMap"
"name" "capo-cluster-resources"
"valuesKey" "allocated_ip"
"targetPath" "cni.calico.helm_values.installation.calicoNetwork.nodeAddressAutodetectionV4.canReach"
)
.Values._internal.capo_calico_autodetection_method_use_canReach_vip
| include "set-only-if"
-}}
- |
{{-
tuple
(dict "kind" "ConfigMap"
"name" "capo-network-mtu"
"valuesKey" "calico_mtu"
"targetPath" "cni.calico.helm_values.installation.calicoNetwork.mtu"
)
(eq .Values.cluster.capi_providers.infra_provider "capo")
| include "set-only-if"
-}}
# we pass everything that is under `cluster` to this unit that uses sylva-capi-cluster chart
# (we do it via a secret because some of the values are credentials in many scenarios)
helm_secret_values: \'{{ .Values.cluster | include "preserve-type" }}\'
cluster-garbage-collector:
info:
description: installs cronjob responsible for unused CAPI resources cleaning
internal: true
enabled: false # until https://gitlab.com/sylva-projects/sylva-core/-/issues/2820 is fully fixed
unit_templates:
- base-deps
- kube-cronjob
depends_on:
cluster: false # we can\'t depend directly on \'cluster\' unit, since it\'s being disabled in \'management-sylva-units\' and re-enabled by \'pivot\'
capi: true
capd: \'{{ tuple . "capd" | include "unit-enabled" }}\'
capv: \'{{ tuple . "capv" | include "unit-enabled" }}\'
capo: \'{{ tuple . "capo" | include "unit-enabled" }}\'
capm3: \'{{ tuple . "capm3" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/cluster-garbage-collector
_components:
- \'{{ tuple "components/dev-ci-cronjob-schedule" (list "dev" "ci" | has .Values.env_type) | include "set-only-if" }}\'
cluster-ready:
info:
description: unit to check readiness of cluster CAPI objects
details: the healthChecks on this unit complements the one done in the \'cluster\' unit, which in some cases can\'t cover all CAPI resources
internal: true
unit_templates:
- dummy
enabled_conditions:
- \'{{ tuple . "cluster" | include "unit-enabled" }}\'
depends_on:
cluster: true
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
sylvactl/timeoutReference: "Kustomization/{{ .Release.Namespace }}/cluster"
kustomization_spec:
healthChecks: >-
{{
(include "cluster-healthchecks" (dict "ns" .Release.Namespace "cluster" .Values.cluster) | fromYaml).result
| include "preserve-type"
}}
cluster-reachable:
info:
internal: true
description: ensure that created clusters are reachable, and make failure a bit more explicit if it is not the case
details: >
This unit will be enabled in bootstrap cluster to check connectivity to management cluster
and in various workload-cluster namespaces in management cluster to check connectivity to workload clusters
unit_templates:
- dummy
enabled: false
depends_on:
# cluster-reachable depends on cluster only on first installation
cluster: \'{{ not .Values._internal.state.is_upgrade }}\'
annotations:
sylvactl/unitTimeout: 15m
kustomization_spec:
targetNamespace: default
kubeConfig:
secretRef:
name: \'{{ .Values.cluster.name }}-kubeconfig\'
_components:
- ../configmap-component
postBuild:
substitute:
UNIT_NAME: cluster-reachable
wait: true
cluster-machines-ready:
info:
description: unit used to wait for all CAPI resources to be ready
details: |
This unit is here so that activity on all units is held off until all the CAPI resources are ready.
This is a distinct unit from \'cluster-ready\' because the readiness criteria is different: here
we not only want the cluster to be ready to host some workload (which only requires some CAPI resources
to be ready) we want all CAPI resources to be ready.
internal: true
enabled_conditions:
- \'{{ tuple . "cluster" | include "unit-enabled" }}\'
depends_on:
# for kubeadm, calico is installed by Flux and Machines don\'t become ready until calico is installed
# so we need to have cluster-machines-ready depend on \'calico\'
# (this is relevant only on installation, on upgrades calico is already installed)
\'{{ .Values._internal.calico_readiness_unit }}\': \'{{ and (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk") (not .Values._internal.state.is_upgrade) }}\'
cluster: true
cluster-ready: true # no need to start waiting for anything until
opennebula-cpi: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capone" }}\'
vsphere-cpi: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capv" }}\'
unit_templates:
- kube-job
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" (add (include "cluster-unit-timeout" . | int) 10) }}\'
sylvactl/timeoutReference: "Kustomization/{{ .Release.Namespace }}/cluster"
kustomization_spec:
_patches:
- \'{{ include "kube-job-add-env-var-patch" (dict
"WAIT_TIMEOUT" .Values.cluster_machines_ready.wait_timeout
"CLUSTER_NAME" .Values.cluster.name
"CONTROL_PLANE" .Values._internal.controlplane_kind
"BOOTSTRAP_PROVIDER" .Values.cluster.capi_providers.bootstrap_provider
)}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/cluster-machines-ready.sh") }}\'
postBuild:
substitute:
POD_ACTIVE_DEADLINE_SECONDS: "1800" # if a checks takes more than 30 minutes to finalize,
# then it\'s better to start a new one, it might be because it was stuck
# and it\'s harmless to do a wait check again
heat-operator:
info:
description: installs OpenStack Heat operator
maturity: core-component
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/heat-operator
wait: true
sylva-units-operator:
info:
description: installs sylva-units operator
maturity: experimental
unit_templates:
- base-deps
repo: sylva-core
depends_on:
flux-system: true
kustomization_spec:
path: ./kustomize-units/sylva-units-operator
wait: true
workload-cluster-operator:
info:
description: installs Sylva operator for managing workload clusters
maturity: experimental
unit_templates:
- base-deps
repo: sylva-core
depends_on:
sylva-units-operator: \'{{ tuple . "sylva-units-operator" | include "unit-enabled" }}\'
external-secrets-operator: \'{{ tuple . "external-secrets-operator" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/workload-cluster-operator
wait: true
misc-controllers-suite:
info:
description: Sylva controllers from [misc-controllers-suite project](https://gitlab.com/sylva-projects/sylva-elements/misc-controllers-suite)
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
postBuild:
substitute:
# ENABLE_CLUSTER_MAXUNAVAILABLE: "true" # only in management.values.yaml
ENABLE_PROVIDER_ID_BLACKLIST: "true"
path: ./kustomize-units/misc-controllers-suite
wait: true
capo-cloud-config:
info:
description: creates CAPO cloud-config used to produce Heat stack
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/capo-cluster-resources/cloud-config
wait: true
targetNamespace: \'{{ .Release.Namespace }}\'
kustomization_substitute_secrets:
CAPO_CLOUD_YAML: \'{{ .Values.cluster.capo.clouds_yaml | toYaml | b64enc }}\'
CAPO_CACERT: \'{{ (.Values.cluster.capo.cacert|default "") | b64enc }}\'
capo-cluster-resources:
info:
description: installs OpenStack Heat stack for CAPO cluster prerequisites
internal: true
labels:
sylva-units/protected: ""
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
repo: sylva-core
depends_on:
capo-cloud-config: true
heat-operator: true
kustomization_spec:
path: ./kustomize-units/capo-cluster-resources/heat-stack
wait: true
targetNamespace: \'{{ .Release.Namespace }}\'
postBuild:
substitute:
STACK_NAME_PREFIX: \'{{ .Values.cluster.name }}-{{ tuple . .Values.cluster.capo.resources_tag | include "interpret-as-string" | replace "." "-" }}\'
CAPO_TAG: \'{{ .Values.cluster.capo.resources_tag }}\'
CAPO_NETWORK_ID: \'{{ .Values.cluster.capo.network_id }}\'
CONTROL_PLANE_AFFINITY_POLICY: \'{{ .Values.openstack.control_plane_affinity_policy }}\'
WORKER_AFFINITY_POLICY: \'{{ .Values.openstack.worker_affinity_policy }}\'
CAPO_EXTERNAL_NETWORK_ID: \'{{ tuple .Values.openstack.external_network_id .Values.openstack.external_network_id | include "set-only-if" }}\'
CONTROL_PLANE_SECURITY_GROUP_NAME: \'capo-{{ .Values.cluster.name }}-security-group-ctrl-plane-{{ .Values.cluster.capo.resources_tag }}\'
WORKER_SECURITY_GROUP_NAME: \'capo-{{ .Values.cluster.name }}-security-group-workers-{{ .Values.cluster.capo.resources_tag }}\'
CAPO_CREATE_IRONIC_SECURITY_GROUP: \'{{ tuple . (and (tuple . "metal3" | include "unit-enabled") (.Values.cluster.capi_providers.infra_provider | eq "capo")) "true" "false" | include "interpret-ternary" }}\'
COMMON_SECURITY_GROUP_NAME: \'capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}\'
DUMMY_UPDATE_TRIGGER: \'helm-release-revision/{{ .Release.Revision }},sylva-units-version/{{ .Chart.Version }}\'
CLUSTER_VIRTUAL_IP_PORT_UUID: \'{{ .Values.openstack.cluster_virtual_ip_port_uuid | default "" }}\'
CLUSTER_FLOATING_IP_UUID: \'{{ .Values.openstack.cluster_floating_ip_uuid | default "" }}\'
capo-network-settings:
info:
description: ConfigMap that contains the network settings for CAPO
internal: true
labels:
sylva-units/protected: ""
unit_templates:
- kube-job
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
depends_on:
capo-cloud-config: true
kustomization_spec:
_patches:
- \'{{ include "kube-job-add-env-var-patch" (dict
"CALICO_ENCAPSULATION_OVERHEAD" .Values._internal.calico_encapsulation_overhead
"RELEASE_NAMESPACE" .Release.Namespace
) }}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/capo-network-settings.sh") }}\'
calico-crd:
info:
description: installs Calico CRDs
maturity: stable
hidden: true
labels:
sylva-units/protected: ""
unit_templates:
- \'{{ tuple "base-deps" (not .Values._internal.state.is_upgrade) | include "set-only-if" }}\'
enabled_conditions:
- \'{{ tuple . "calico" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
helm_repo_url: https://rke2-charts.rancher.io
helm_chart_versions:
v3.30.100: >-
{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}
v3.30.300: >-
{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}
helmrelease_spec:
releaseName: rke2-calico-crd
targetNamespace: kube-system
chart:
spec:
chart: rke2-calico-crd
version: "" # will be defined by helm_chart_versions
kustomization_spec:
prune: false
calico:
info:
description: install Calico CNI
maturity: stable
labels:
sylva-units/protected: ""
unit_templates:
- \'{{ tuple "base-deps" (not .Values._internal.state.is_upgrade) | include "set-only-if" }}\'
depends_on:
calico-crd: true
capo-network-settings: \'{{ tuple . "capo-network-settings" | include "unit-enabled" }}\'
helm_repo_url: https://rke2-charts.rancher.io
helm_chart_versions:
v3.30.100: >-
{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}
v3.30.300: >-
{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}
helmrelease_spec:
# Setting drift detection mode to warn for calico till https://gitlab.com/sylva-projects/sylva-core/-/issues/1814 is solved.
driftDetection:
mode: warn
releaseName: \'{{ list "cabpk" "cabpck" | has .Values.cluster.capi_providers.bootstrap_provider | ternary "calico" "rke2-calico" }}\'
targetNamespace: \'{{ list "cabpk" "cabpck" | has .Values.cluster.capi_providers.bootstrap_provider | ternary "sylva-system" "kube-system" }}\'
chart:
spec:
chart: rke2-calico
version: "" # will be defined by helm_chart_versions
values: \'{{ .Values.calico_helm_values | include "preserve-type" }}\'
valuesFrom:
- |
{{- tuple
(dict "kind" "ConfigMap"
"name" "capo-network-mtu"
"valuesKey" "calico_mtu"
"targetPath" "installation.calicoNetwork.mtu"
)
(eq .Values.cluster.capi_providers.infra_provider "capo")
| include "set-only-if"
-}}
# For Calico nodeAddressAutodetection, for RKE2 on CAPO, we need to pass the cluster VIP in calico values
# as installation.calicoNetwork.nodeAddressAutodetectionV4.canReach
#
# this address is read from the ConfigMap produced by capo-cluster-resources unit
# which allocates the VIP
- |
{{- tuple
(dict "kind" "ConfigMap"
"name" "capo-cluster-resources"
"valuesKey" "allocated_ip"
"targetPath" "installation.calicoNetwork.nodeAddressAutodetectionV4.canReach"
)
.Values._internal.capo_calico_autodetection_method_use_canReach_vip
| include "set-only-if"
-}}
calico-ready:
info:
internal: true
description: ensure Calico resources created by the Tigera operator are ready before running further steps
details: >
This unit will be enabled in bootstrap cluster to confirm management cluster CNI readiness
and in various workload-cluster namespaces in management cluster to do the same for workload clusters
maturity: beta
enabled: false # disabled in management cluster only for initial installation, see management.values.yaml
enabled_conditions:
- \'{{ tuple . "calico" | include "unit-enabled" }}\'
unit_templates:
- dummy
depends_on:
calico: true
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
kustomization_spec:
targetNamespace: default
kubeConfig:
secretRef:
name: \'{{ .Values.cluster.name }}-kubeconfig\'
healthChecks:
- apiVersion: operator.tigera.io/v1
kind: Installation
name: default
metallb:
info:
description: installs MetalLB operator
maturity: stable
labels:
sylva-units/protected: ""
unit_templates:
- base-deps
enabled_conditions:
- \'{{ or (.Values.cluster.capi_providers.infra_provider | eq "capd")
(.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr") }}\'
depends_on:
monitoring-crd: \'{{ tuple . "monitoring-crd" | include "unit-enabled" }}\'
\'{{ .Values._internal.calico_readiness_unit }}\': \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" }}\'
cluster-vip: \'{{ and (tuple . "cluster-vip" | include "unit-enabled") .Values._internal.state.is_upgrade }}\'
helm_repo_url: https://metallb.github.io/metallb
helmrelease_spec:
driftDetection:
ignore:
- target:
kind: CustomResourceDefinition
name: bgppeers.metallb.io
paths:
- /spec/conversion/webhook/clientConfig/caBundle
chart:
spec:
chart: metallb
version: 0.15.2
targetNamespace: metallb-system
install:
createNamespace: true
values: \'{{ .Values.metallb_helm_values | include "preserve-type" }}\'
metallb-resources:
info:
description: configures metallb resources
internal: true
labels:
sylva-units/protected: ""
enabled_conditions:
- \'{{ tuple . "metallb" | include "unit-enabled" }}\'
unit_templates:
- base-deps
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
depends_on:
metallb: \'{{ not .Values._internal.state.is_upgrade }}\'
repo: metallb-resources
helm_chart_artifact_name: metallb-resources
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: metallb-system
valuesFrom:
# for capo the cluster VIP has to be taken from capo-cluster-resources ConfigMap (and isn\'t included in .Values._internal.metallb above)
- kind: ConfigMap
name: capo-cluster-resources
valuesKey: allocated_ip
targetPath: cluster_virtual_ip # will result in the creation of the \'lbpool\' IPAddressPool
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
helm_secret_values: |-
{{- $resources := dict }}
{{- tuple $resources .Values.metallb .Values._internal.metallb | include "merge-append" }}
{{ $resources | include "preserve-type" }}
cinder-csi:
info:
description: installs OpenStack Cinder CSI
maturity: stable
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" }}\'
depends_on:
namespace-defs: true
helm_repo_url: https://kubernetes.github.io/cloud-provider-openstack
helmrelease_spec:
chart:
spec:
chart: openstack-cinder-csi
version: 2.33.1
targetNamespace: cinder-csi
install:
createNamespace: false
values:
clusterID: \'{{ .Values.cluster.capo.resources_tag }}\'
storageClass:
enabled: false
delete:
isDefault: false
allowVolumeExpansion: true
retain:
isDefault: false
allowVolumeExpansion: true
custom: |-
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "{{ .Values.openstack.storageClass.name }}"
annotations:
storageclass.kubernetes.io/is-default-class: "{{ eq .Values._internal.default_storage_class .Values.openstack.storageClass.name }}"
provisioner: cinder.csi.openstack.org
volumeBindingMode: Immediate
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
type: "{{ .Values.openstack.storageClass.type }}"
_postRenderers:
- kustomize:
patches:
- target:
kind: Deployment
name: openstack-cinder-csi-controllerplugin
patch: |
kind: Deployment
metadata:
name: openstack-cinder-csi-controllerplugin
spec:
template:
metadata:
annotations:
clouds-yaml-hash: \'{{ .Values._internal.clouds_yaml_hash }}\'
helm_secret_values:
secret:
enabled: "true"
create: "true"
name: cinder-csi-cloud-config
data:
cloud.conf: |-
{{- if .Values.cluster.capi_providers.infra_provider | eq "capo" -}}
[Global]
auth-url = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.auth_url | quote }}
tenant-name = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.project_name | quote }}
domain-name = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.user_domain_name | quote }}
username = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.username | quote }}
password = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.password | quote }}
region = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.region_name | quote }}
tls-insecure = {{ not .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.verify }}
[BlockStorage]
ignore-volume-az = true
{{- end -}}
synchronize-secrets:
info:
description: allows secrets from Vault to be consumed other units, relies on ExternalSecrets
internal: true
unit_templates:
- base-deps
depends_on:
eso-secret-stores: true
vault-secrets: true
keycloak-init: \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/synchronize-secrets
_components:
- \'{{ tuple "components/keycloak" (tuple . "keycloak" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/rancher" (tuple . "rancher" | include "unit-enabled") | include "set-only-if" }}\'
postBuild:
substitute:
FLUX_ADMIN_USERNAME: \'{{ .Values.flux_webui.admin_user }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
healthChecks:
- apiVersion: v1
kind: Secret
name: cluster-user-auth
namespace: flux-system
- apiVersion: v1
kind: Secret
name: cluster-creator-secret
namespace: sylva-system
- >-
{{ tuple (dict
"apiVersion" "v1"
"kind" "Secret"
"name" "rancher-bootstrap-secret"
"namespace" "cattle-system"
)
(tuple . "rancher" | include "unit-enabled")
| include "set-only-if"
}}
- >-
{{ tuple (dict
"apiVersion" "v1"
"kind" "Secret"
"name" "keycloak-bootstrap-admin"
"namespace" "keycloak"
)
(tuple . "keycloak" | include "unit-enabled")
| include "set-only-if"
}}
rancher-init:
info:
description: initializes and configures Rancher
maturity: beta
internal: true
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.rancher.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/rancher-init
targetNamespace: cattle-system
wait: true
postBuild:
substitute:
SERVICE: rancher
SERVICE_DNS: \'{{ .Values.external_hostnames.rancher }}\'
CERT: \'{{ .Values.external_certificates.rancher.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
_components:
- \'{{ tuple "components/webhook-ha" (.Values._internal.ha_cluster.is_ha) | include "set-only-if" }}\'
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.rancher "cert") }}\'
- "../tls-components/sylva-ca"
rancher:
info:
description: installs Rancher
maturity: stable
unit_templates:
- base-deps
depends_on:
namespace-defs: true
cert-manager: true
k8s-gateway: true
rancher-init: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
synchronize-secrets: true
annotations:
sylvactl/readyMessage: "Rancher UI can be reached at https://{{ .Values.external_hostnames.rancher }} ({{ .Values._internal.display_external_ip_msg }})"
sylvactl/unitTimeout: 10m
helm_repo_url: https://releases.rancher.com/server-charts/stable
helmrelease_spec:
chart:
spec:
chart: rancher
# When changing the major or minor version of rancher, make sure that renovate is
# also updated to accept the corresponding versions of the other rancher charts (monitory, sriov, longhorn, backup)
# See https://github.com/rancher/charts/blob/dev-v2.9/README.md
version: 2.11.3
targetNamespace: cattle-system
values:
additionalTrustedCAs: true
auditLog:
level: \'{{ .Values.audit_log.level | include "preserve-type" }}\'
privateCA: true
useBundledSystemChart: true
hostname: \'{{ .Values.external_hostnames.rancher }}\'
ingress:
enabled: true
ingressClassName: nginx
tls:
source: secret
secretName: rancher-tls
extraAnnotations:
nginx.ingress.kubernetes.io/proxy-body-size: 8m
# restrictedAdmin: true
# negative value will deploy 1 to abs(replicas) depending on available number of nodes
replicas: -3
features: embedded-cluster-api=false,provisioningv2=true,managed-system-upgrade-controller=false
proxy: \'{{ get .Values.proxies "https_proxy" }}\'
noProxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
postDelete:
namespaceList:
- cattle-fleet-system
- rancher-operator-system
extraEnv:
- name: CATTLE_BOOTSTRAP_PASSWORD
valueFrom:
secretKeyRef:
name: rancher-bootstrap-secret
key: bootstrapPassword
_postRenderers:
- kustomize:
patches:
- patch: |-
kind: Deployment
apiVersion: apps/v1
metadata:
name: rancher
spec:
template:
spec:
volumes:
- name: tls-ca-additional-volume
secret:
defaultMode: 256
items:
- key: ca.crt
path: ca-additional.pem
secretName: sylva-ca.crt
- name: tls-ca-volume
secret:
defaultMode: 256
secretName: rancher-tls
items:
- key: ca.crt
path: cacerts.pem
# this is to avoid that the too-short default liveness probe
# prevents the Rancher installation from finishing before the pod is killed:
containers:
- name: rancher
livenessProbe:
initialDelaySeconds: 120
periodSeconds: 30
failureThreshold: 20
kustomization_spec:
# these healthChecks are added so that does not become ready before
# a few things that Rancher sets up behind the scene are ready
healthChecks:
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: clusters.provisioning.cattle.io # this is because capi-rancher-import needs this
- apiVersion: apps/v1
kind: Deployment
name: rancher-webhook
namespace: cattle-system
- apiVersion: v1
kind: Service
name: rancher-webhook
namespace: cattle-system
rancher-custom-roles:
info:
description: configures custom roles for Rancher
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
repo: sylva-core
depends_on:
rancher: true
kustomization_spec:
path: ./kustomize-units/rancher-custom-roles
wait: true
rancher-keycloak-oidc-provider:
info:
description: configures Rancher for Keycloak OIDC integration
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
repo: sylva-core
depends_on:
rancher: true
rancher-custom-roles: true
keycloak: true
keycloak-resources: true
keycloak-oidc-external-secrets: true
kustomization_spec:
path: ./kustomize-units/rancher-keycloak-oidc-provider
postBuild:
substitute:
KEYCLOAK_EXTERNAL_URL: \'{{ .Values.external_hostnames.keycloak }}\'
RANCHER_EXTERNAL_URL: \'{{ .Values.external_hostnames.rancher }}\'
AUTH_USER_INFO_MAX_AGE_SECONDS: \'{{ .Values.rancher.auth_user_info_max_age_seconds | quote }}\'
AUTH_USER_INFO_RESYNC_CRON: \'{{ .Values.rancher.auth_user_info_resync_cron | quote }}\'
wait: true
k8s-gateway:
info:
description: installs k8s gateway (coredns + plugin to resolve external service names to ingress IPs)
details: >
is here only to allow for DNS resolution of Ingress hosts (FQDNs), used for importing workload clusters into Rancher and for flux-webui to use Keycloak SSO
maturity: stable
unit_templates:
- base-deps
depends_on:
metallb-resources: \'{{ tuple . "metallb" | include "unit-enabled" }}\'
helm_repo_url: https://ori-edge.github.io/k8s_gateway/
helmrelease_spec:
chart:
spec:
chart: k8s-gateway
version: 2.4.0
targetNamespace: k8s-gateway
driftDetection:
mode: enabled
install:
createNamespace: true
upgrade:
force: true
values:
domain: \'{{ .Values.cluster_domain }}\'
replicaCount: 3
service:
annotations: \'{{ .Values._internal.lb_service_annotations | default dict | include "preserve-type" }}\'
# Following extraZonePlugins lines include all chart defaults plus the hosts plugin
extraZonePlugins:
- name: log
- name: errors
- name: health
configBlock: |-
lameduck 5s
- name: ready
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
- name: loop
- name: reload
- name: loadbalance
- name: hosts
configBlock: |-
{{- $display_external_ip := .Values.display_external_ip }}
{{- range $name,$domain := .Values.external_hostnames }}
{{ $display_external_ip }} {{ $domain }}
{{- end }}
fallthrough
_postRenderers:
- kustomize:
patches:
- patch: |
kind: Service
apiVersion: v1
metadata:
name: k8s-gateway
spec:
type: LoadBalancer
loadBalancerClass: \'{{ .Values._internal.loadBalancerClass }}\'
- patch: |
kind: Deployment
apiVersion: apps/v1
metadata:
name: k8s-gateway
spec:
template:
spec:
containers:
- name: k8s-gateway
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
cluster-vip:
info:
description: Defines the cluster-vip Service for MetalLB load-balancing
details:
MetalLB will only handle the VIP if it has a corresponding service with endpoints,
but we don\'t want that the API access (6443) relies on kube-proxy, because on RKE2 agent nodes,
kube-proxy uses RKE2 internal load-balancing proxy that may fall-back to the VIP to access the API,
which could create a deadlock if endpoints are not up-to-date.
The cluster-vip Service that plays this role. This unit manages this resource, taking over
the control after the initial creation of this Service by a cloud-init post command on the first node).
internal: true
labels:
sylva-units/protected: ""
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "metallb" | include "unit-enabled" }}\'
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/cluster-vip
wait: true
prune: false
postBuild:
substitute:
# in the general case, the cluster VIP is taken from .Values.cluster_virtual_ip
# except in the CAPO case where we don\'t know it beforehand in values and we take it
# from capo-cluster-resources ConfigMap (see substituteFrom below)
allocated_ip: \'{{ tuple .Values.cluster_virtual_ip (not (.Values.cluster.capi_providers.infra_provider | eq "capo")) | include "set-only-if" }}\'
lb_class: sylva.org/metallb-class
substituteFrom:
# see explanation above about allocated_ip / cluster_virtual_ip in CAPO case
- kind: ConfigMap
name: capo-cluster-resources
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
rancher-turtles:
info:
description: installs the Rancher Turtles operator, which enables the import of Cluster API workload clusters into the management cluster\'s Rancher
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
depends_on:
rancher: true
k8s-gateway: true
helm_repo_url: https://rancher.github.io/turtles
helmrelease_spec:
install:
disableHooks: true
driftDetection:
ignore:
- target:
kind: ClusterRole
name: rancher-turtles-aggregated-manager-role
paths:
- /rules
chart:
spec:
chart: rancher-turtles
version: 0.22.0
targetNamespace: rancher-turtles-system
values:
rancherTurtles:
features:
agent-tls-mode:
enabled: true
cluster-api-operator:
enabled: false
cleanup: false
cluster-api:
enabled: false
ingress-nginx-init:
info:
description: creates the default certificate for the ingress-nginx controller
internal: true
enabled_conditions:
- \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/ingress-nginx-init
postBuild:
substitute:
CLUSTER_IP: "{{ .Values.display_external_ip }}"
healthChecks:
- apiVersion: v1
kind: Secret
name: default-nginx-tls
namespace: kube-system
ingress-nginx:
info:
description: installs Nginx ingress controller
maturity: stable
unit_templates:
- base-deps
annotations:
sylvactl/unitTimeout: \'{{ printf "%dm" ( include "cluster-unit-timeout" . | int) }}\'
depends_on:
ingress-nginx-init: \'{{ tuple . "ingress-nginx-init" | include "unit-enabled" }}\'
metallb-resources: \'{{ tuple . "metallb" | include "unit-enabled" }}\'
\'{{ .Values._internal.calico_readiness_unit }}\': \'{{ tuple . "calico" | include "unit-enabled" }}\'
helm_repo_url: https://rke2-charts.rancher.io
helm_chart_versions:
4.12.401: \'{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}\'
4.12.600: \'{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}\'
helmrelease_spec:
releaseName: rke2-ingress-nginx
targetNamespace: kube-system
chart:
spec:
chart: rke2-ingress-nginx
version: "" # will be defined by helm_chart_versions
driftDetection:
mode: enabled
upgrade:
# Increase upgrade timeout as ingress-nginx pods have a longer terminationGracePeriodSeconds,
# with a special case for pods that were deployed with a previous version of sylva where it was set to 300s
timeout: >-
{{- $nginxDs := lookup "apps/v1" "DaemonSet" "kube-system" "rke2-ingress-nginx-controller" -}}
{{- if eq ($nginxDs | dig "spec" "template" "spec" "terminationGracePeriodSeconds" 90) 300 -}}
{{- print (mul 10 ($nginxDs | dig "status" "desiredNumberScheduled" 3)) "m" -}}
{{- else -}}
30m
{{- end -}}
force: true
values:
fullnameOverride: rke2-ingress-nginx
controller:
# Decrease default terminationGracePeriodSeconds (of 300s) to fasten upgrades, and tune worker-shutdown-timeout accordingly (default is 240s)
terminationGracePeriodSeconds: 90
config:
worker-processes: 8
worker-shutdown-timeout: 60s
use-forwarded-headers: true
large-client-header-buffers: "4 16k"
# Install controllers only on control plane nodes, otherwise it may take a while to upgrade them successively on clusters with lot of nodes
nodeSelector:
node-role.kubernetes.io/control-plane: \'{{ .Values._internal.cp_node_label_value }}\'
kind: DaemonSet
service:
loadBalancerClass: \'{{ .Values._internal.loadBalancerClass }}\'
enabled: true
annotations: \'{{ .Values._internal.lb_service_annotations | default dict | include "preserve-type" }}\'
publishService:
enabled: true
ingressClassResource:
default: true
hostPort:
enabled: false
resources:
requests:
memory: "300Mi"
limits:
memory: "1Gi"
extraArgs:
default-ssl-certificate: "kube-system/default-nginx-tls"
kustomization_spec:
healthChecks:
- apiVersion: apps/v1
kind: DaemonSet
name: rke2-ingress-nginx-controller
namespace: kube-system
first-login-rancher:
info:
description: configure Rancher authentication for admin
internal: true
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
repo: sylva-core
unit_templates:
- base-deps
depends_on:
rancher: true
kustomization_spec:
path: ./kustomize-units/first-login-rancher
postBuild:
substitute:
RANCHER_EXTERNAL_URL: \'{{ .Values.external_hostnames.rancher }}\'
CURRENT_TIME: \'{{ now | date "2006-01-02T15:04:05.999Z" }}\'
wait: true
flux-webui-init:
info:
description: initializes and configures flux-webui
maturity: beta
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "flux-webui" | include "unit-enabled" }}\'
depends_on:
sylva-ca: true
external-secrets-operator: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.flux.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/flux-webui-init
targetNamespace: flux-system
postBuild:
substitute:
SERVICE: flux-webui
SERVICE_DNS: \'{{ .Values.external_hostnames.flux }}\'
CERT: \'{{ .Values.external_certificates.flux.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.flux "cert") }}\'
- "../tls-components/sylva-ca"
healthChecks:
- apiVersion: v1
kind: Secret
name: flux-webui-tls
namespace: flux-system
flux-webui:
info:
description: installs Weave GitOps Flux web GUI
maturity: stable
unit_templates:
- base-deps
depends_on:
flux-system: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
coredns-config: \'{{ tuple . "keycloak" | include "unit-enabled" }}\' # see https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/1023#note_1694289969
keycloak-add-client-scope: \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
keycloak-oidc-external-secrets: \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
flux-webui-init: true
annotations:
sylvactl/readyMessage: "Flux Web UI can be reached at https://{{ .Values.external_hostnames.flux }} ({{ .Values._internal.display_external_ip_msg }})"
repo: weave-gitops
helm_chart_artifact_name: weave-gitops
helmrelease_spec:
chart:
spec:
chart: charts/gitops-server
targetNamespace: flux-system
install:
createNamespace: false
upgrade:
force: true
values:
logLevel: info
envVars:
- name: WEAVE_GITOPS_FEATURE_TENANCY
value: "true"
- name: WEAVE_GITOPS_FEATURE_CLUSTER
value: "false"
- name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL
value: "Log in with Keycloak"
installCRDs: true
podSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
adminUser:
create: true
username: \'{{ .Values.flux_webui.admin_user }}\'
createSecret: false
rbac:
impersonationResourceNames: ["admin", "sylva-admin@example.com"] # the Keycloak username set in unit keycloak-resources; cannot use "infra-admins" group here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/427
additionalRules:
- apiGroups: ["*"]
resources: ["*"]
verbs: [ "get", "list", "watch" ]
ingress:
enabled: true
className: nginx
hosts:
- host: \'{{ .Values.external_hostnames.flux }}\'
paths:
- path: / # setting this to another value like \'/flux-webui\' does not work (URLs coming back from flux webui aren\'t rewritten by nginx)
pathType: Prefix
tls:
- secretName: flux-webui-tls
hosts:
- \'{{ .Values.external_hostnames.flux }}\'
extraVolumes:
- name: custom-ca-cert
secret:
secretName: sylva-ca.crt
items:
- key: ca.crt
path: ca.crt
extraVolumeMounts:
- name: custom-ca-cert
mountPath: /etc/ssl/certs
readOnly: true
oidcSecret:
create: false
_postRenderers:
- kustomize:
patches:
- patch: |-
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: \'{{ .Values.flux_webui.admin_user }}-user-read-resources-cr\'
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: \'{{ .Values.flux_webui.admin_user }}\'
- apiGroup: rbac.authorization.k8s.io
kind: User
name: sylva-admin@example.com # add same RBAC for the SSO user, so that when flux-webui SA impersonates it has privileges; cannot use "infra-admins" group here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/427
prometheus-custom-metrics:
info:
description: Prometheus configuration for custom resource metrics
details: Adding podmonitors for flux controllers and create custom metrics for various resources by configuring kube-state-metrics
internal: true
unit_templates:
- base-deps
repo: sylva-core
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/prometheus-custom-metrics
wait: false
force: true
monitoring-crd:
info:
description: installs monitoring stack CRDs
maturity: stable
hidden: true
unit_templates:
- base-deps
depends_on:
# this unit provides the monitoring CRDs which Kyverno unit consumes
# so it cannot depend on kyverno
kyverno: false
kyverno-policies: false
namespace-defs: true
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
helm_repo_url: https://charts.rancher.io/
helmrelease_spec:
releaseName: rancher-monitoring-crd
targetNamespace: cattle-monitoring-system
storageNamespace: cattle-monitoring-system # see https://gitlab.com/sylva-projects/sylva-core/-/issues/443
chart:
spec:
chart: rancher-monitoring-crd
version: 106.1.2+up69.8.2-rancher.7
kustomization_spec:
prune: false
grafana-init:
info:
description: sets up Grafana certificate for Keycloak OIDC integration
internal: true
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
- \'{{ tuple . "keycloak" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/dummy/base
postBuild:
substitute:
SERVICE: grafana
SERVICE_DNS: \'{{ .Values.external_hostnames.grafana }}\'
CERTIFICATE_NAMESPACE: cattle-monitoring-system
CERT: \'{{ .Values.external_certificates.grafana.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
_components:
- \'{{ ternary "../../tls-components/tls-secret" "../../tls-components/tls-certificate" (hasKey .Values.external_certificates.grafana "cert") }}\'
- "../../tls-components/sylva-ca"
healthChecks:
- apiVersion: v1
kind: Secret
name: sylva-ca.crt
namespace: cattle-monitoring-system
- apiVersion: v1
kind: Secret
name: grafana-tls
namespace: cattle-monitoring-system
monitoring:
info:
description: installs monitoring stack
maturity: stable
unit_templates:
- base-deps
depends_on:
namespace-defs: true
monitoring-crd: true
prometheus-resources: \'{{ tuple . "prometheus-resources" | include "unit-enabled" }}\'
annotations:
sylvactl/unitTimeout: "{{ printf \\"%dm\\" (mul 5 .Values.cluster.control_plane_replicas) }}"
helm_repo_url: https://charts.rancher.io/
helmrelease_spec:
driftDetection:
ignore:
- target:
# The Prometheus Operator annotates \'PrometheusRule\' resources with \'prometheus-operator-validated: true\'
# after validation checks. This occurs only at apply time, so drift detection should ignore it.
kind: PrometheusRule
paths:
- /metadata/annotations/prometheus-operator-validated
- target:
# The certgen webhook injects the caBundle at runtime, so drift detection should ignore it.
kind: ValidatingWebhookConfiguration
paths:
- /webhooks[0]/clientConfig/caBundle
- target:
# The certgen webhook injects the caBundle at runtime, so drift detection should ignore it.
kind: MutatingWebhookConfiguration
paths:
- /webhooks[0]/clientConfig/caBundle
releaseName: rancher-monitoring
targetNamespace: cattle-monitoring-system
storageNamespace: cattle-monitoring-system # see https://gitlab.com/sylva-projects/sylva-core/-/issues/443
chart:
spec:
chart: rancher-monitoring
version: 106.1.2+up69.8.2-rancher.7
install:
createNamespace: false
values:
kube-state-metrics:
extraArgs:
- --metric-labels-allowlist=namespaces=[field.cattle.io/projectId]
grafana:
sidecar:
dashboards:
enabled: true
searchNamespace: ALL
multicluster:
global:
enabled: true
etcd:
enabled: true
prometheus-adapter:
enabled: false
prometheus:
prometheusSpec:
scrapeInterval: "60s"
scrapeTimeout: "30s"
evaluationInterval: "30s"
resources:
limits:
memory: 6Gi
requests:
memory: 3Gi
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
resources:
requests:
# default storage is 50Gi, except for CI and dev environments, 8Gi
storage: \'{{ has .Values.env_type (list "dev" "ci") | ternary "8Gi" "50Gi" }}\'
externalLabels:
cluster: \'{{ .Values.cluster.name }}\' # required for multi-cluster dashboards
platform_tag: \'{{ .Values.monitoring.platform_tag }}\'
remoteWriteDashboards: true
remoteWrite:
- url: \'{{ .Values.monitoring.thanos.receive_url }}\'
name: \'{{ .Values.cluster.name }}\'
basicAuth:
username:
name: thanos-basic-auth
key: username
password:
name: thanos-basic-auth
key: password
tlsConfig:
insecureSkipVerify: true
queueConfig:
batchSendDeadline: 5s
minBackoff: 1s
maxBackoff: 30s
alertmanager:
alertmanagerSpec:
useExistingSecret: true
configSecret: sylva-alertmanager-config # this Secret is a byproduct of the alertmanager-config unit
replicas: \'{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 }}\'
storage:
volumeClaimTemplate:
spec:
storageClassName: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
resources:
requests:
# default storage is 2Gi, except for CI and dev environments, 1Gi
storage: \'{{ has .Values.env_type (list "dev" "ci") | ternary "1Gi" "2Gi" }}\'
podAntiAffinity: hard #the scheduler is *required* to not schedule two replica pods onto the same node
podDisruptionBudget:
enabled: true
minAvailable: \'{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 }}\'
valuesFrom:
- kind: ConfigMap
name: custom-resource-state-config # this configmap is a byproduct of the prometheus-custom-metrics unit
optional: \'{{ not (tuple . "prometheus-custom-metrics" | include "unit-enabled") | include "as-bool" }}\'
- kind: ConfigMap
name: prometheus-config-values # this configmap is a byproduct of the prometheus-resources unit
optional: \'{{ not (tuple . "prometheus-resources" | include "unit-enabled") | include "as-bool" }}\'
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
helm_secret_values:
grafana:
adminPassword: \'{{ .Values._internal.grafana_admin_password }}\'
prometheus:
extraSecret:
name: thanos-basic-auth
data:
username: \'{{ .Values._internal.thanos_user }}\'
password: \'{{ .Values._internal.thanos_password }}\'
goldpinger-init:
info:
description: sets up Goldpinger prerequisites
details: it generates tls secret for goldpinger
internal: true
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "goldpinger" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.goldpinger.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/goldpinger-init
wait: true
postBuild:
substitute:
SERVICE: goldpinger
SERVICE_DNS: \'{{ .Values.external_hostnames.goldpinger }}\'
CERTIFICATE_NAMESPACE: goldpinger
CERT: \'{{ .Values.external_certificates.goldpinger.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.goldpinger "cert") }}\'
- "../tls-components/sylva-ca"
goldpinger:
info:
description: installs Goldpinger for pod-to-pod network observability
maturity: experimental
unit_templates:
- base-deps
enabled: \'{{ has .Values.env_type (list "dev" "ci") }}\'
depends_on:
goldpinger-init: \'{{ tuple . "goldpinger-init" | include "unit-enabled" }}\'
namespace-defs: true
prometheus-resources: \'{{ tuple . "prometheus-resources" | include "unit-enabled" }}\'
openshift-security-context-constraints: \'{{ eq .Values.cluster.capi_providers.bootstrap_provider "cabpoa" }}\'
annotations:
sylvactl/readyMessage: "Goldpinger UI can be reached at https://{{ .Values.external_hostnames.goldpinger }} ({{ .Values._internal.display_external_ip_msg }})"
helm_repo_url: https://bloomberg.github.io/goldpinger/
helmrelease_spec:
releaseName: goldpinger
targetNamespace: goldpinger
storageNamespace: goldpinger
chart:
spec:
chart: goldpinger
version: 1.0.1
install:
createNamespace: true
values:
service:
type: ClusterIP
ingress:
enabled: true
className: nginx
hosts:
- host: "{{ .Values.external_hostnames.goldpinger }}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "{{ .Values.external_hostnames.goldpinger }}"
secretName: goldpinger-tls
rbac:
create: true
extraEnv:
- name: TCP_TARGETS
value: "{{ .Values.cluster_virtual_ip }}:6443"
- name: HTTP_TARGETS
value: "http://goldpinger.goldpinger.svc.cluster.local:8081"
serviceMonitor:
enabled: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
nodeSelector: {}
tolerations: []
prometheus-resources:
info:
description: Creates required ConfigMaps and Kyverno policies to enable SNMP monitoring by Prometheus
internal: true
unit_templates:
- base-deps
depends_on:
namespace-defs: true
metal3: \'{{ tuple . "metal3" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/prometheus-resources
force: true
wait: true
postBuild:
substitute:
CONFIGMAPS: \'
{{- $cm := list -}}
{{- if (tuple . "snmp-exporter" | include "unit-enabled") -}}
{{- $cm = append $cm "prometheus-snmp-sd-files" -}}
{{- $cm = append $cm "prometheus-snmp-sd-files-bmh" -}}
{{- end -}}
{{ $cm | toJson }}\'
TARGETS: >
{{- $sylva_cluster := .Values.cluster.name -}}
{{- $result := list -}}
{{- range .Values.snmp.devices -}}
{{- $target := dict "targets" (list .target) -}}
{{- $_ := set $target "labels" (dict "module" .module "auth" .auth "alias" .alias "cluster_name" (.cluster_name | default $sylva_cluster)) -}}
{{- $result = append $result $target -}}
{{- end -}}
{{ $result | toJson }}
_components:
- \'{{ tuple "components/kyverno-snmp-bmh-policy" (tuple . "metal3" | include "unit-enabled") | include "set-only-if" }}\'
alertmanager-jiralert:
info:
description: installs Alertmanager webhook Jiralert
details: Jiralert is an Alertmanager wehbook that creates Jira issues
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
- \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | empty | not }}\'
unit_templates:
- base-deps
depends_on:
alertmanager-jiralert-config: true
helm_repo_url: https://prometheus-community.github.io/helm-charts
helmrelease_spec:
targetNamespace: cattle-monitoring-system
chart:
spec:
chart: jiralert
version: 1.8.1
values:
extraArgs:
- -log.level=info
- -hash-jira-label
existingConfigSecret: sylva-alertmanager-webhook-jiralert # this Secret is a byproduct of the alertmanager-jiralert-config unit
fullnameOverride: \'alertmanager-jiralert\'
podAnnotations:
sylva/jiralert-config: \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | toJson | sha256sum | trunc 8 }}\'
env:
http_proxy: \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.http_proxy | default .Values.proxies.http_proxy }}\'
https_proxy: \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.https_proxy | default .Values.proxies.https_proxy }}\'
no_proxy: \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.no_proxy | default (include "sylva-units.no_proxy" (tuple .)) }}\'
replicaCount: \'{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}\'
podDisruptionBudget:
minAvailable: \'{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}\'
unhealthyPodEvictionPolicy: AlwaysAllow
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
alertmanager-jiralert-config:
info:
description: generates the config for Jiralert Alertmanager webhook
maturity: beta
enabled_conditions:
- \'{{ tuple . "alertmanager-jiralert" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
repo: sylva-alertmanager-resources
helmrelease_spec:
chart:
spec:
chart: webhooks/jiralert
targetNamespace: cattle-monitoring-system
helm_secret_values:
config: \'{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | include "preserve-type" }}\'
alertmanager-snmp-notifier:
info:
description: installs Alertmanager webhook snmp-notifier
details: snmp-notifier is an Alertmanager wehbook that sends alerts as snmp traps
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
# only enable if SNMP traps destinations are set
- \'{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.destinations | empty | not }}\'
unit_templates:
- base-deps
helm_repo_url: https://prometheus-community.github.io/helm-charts
helmrelease_spec:
targetNamespace: cattle-monitoring-system
chart:
spec:
chart: alertmanager-snmp-notifier
version: 2.1.0
values:
snmpNotifier:
extraArgs:
\'{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.final_extra_args }}\'
snmpDestinations:
\'{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.destinations | default list | include "preserve-type" }}\'
fullnameOverride: \'alertmanager-snmp-notifier\'
extraConfigmapMounts:
- name: snmp-notifier-templates
configMap: sylva-alertmanager-webhook-snmp-notifier # this ConfigMap is a byproduct of the alertmanager-snmp-notifier-config unit
mountPath: /etc/snmp_notifier/
readOnly: true
podAnnotations:
sylva/snmp-notifier-config: \'{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config | toJson | sha256sum | trunc 8 }}\'
replicaCount: \'{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}\'
podDisruptionBudget:
minAvailable: \'{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}\'
unhealthyPodEvictionPolicy: AlwaysAllow
securityContext:
runAsNonRoot: true
runAsUser: 65535
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
resources:
limits:
cpu: "0.2"
memory: "128Mi"
requests:
cpu: "0.1"
memory: "64Mi"
helm_secret_values:
snmpNotifier:
snmpCommunity: "{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.community }}"
snmpAuthenticationUsername: "{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.username }}"
snmpAuthenticationPassword: "{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.password }}"
snmpPrivatePassword: "{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.priv_password }}"
alertmanager-snmp-notifier-config:
info:
description: generates the config for snmp-notifier Alertmanager webhook
maturity: beta
enabled_conditions:
- \'{{ tuple . "alertmanager-snmp-notifier" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
repo: sylva-alertmanager-resources
helmrelease_spec:
chart:
spec:
chart: webhooks/snmp-notifier
targetNamespace: cattle-monitoring-system
alertmanager-config:
info:
description: generates the config for Alertmanager
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
- \'{{ .Values.units.monitoring.helmrelease_spec.values.alertmanager.alertmanagerSpec.useExistingSecret }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
repo: sylva-alertmanager-resources
helm_chart_artifact_name: alertmanager-config
helmrelease_spec:
chart:
spec:
chart: config
targetNamespace: cattle-monitoring-system
helm_secret_values:
config: \'{{ .Values.monitoring.alertmanager.config | include "preserve-type" }}\'
prometheus-pushgateway:
info:
description: installs Prometheus Push-gateway exporter
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
monitoring: true
helm_repo_url: https://prometheus-community.github.io/helm-charts
helmrelease_spec:
releaseName: prometheus-pushgateway
targetNamespace: pushgateway
storageNamespace: pushgateway
chart:
spec:
chart: prometheus-pushgateway
version: 3.4.1
install:
createNamespace: true
values:
persistentVolume:
enabled: true
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
extraArgs:
- --persistence.file=/data/pushgateway.data
- --persistence.interval=5m
extraContainers:
- name: prometheus-pushgateway-metrics-purger
image: curlimages/curl:8.15.0
command:
- sh
- -c
- |
while true
do
del_req="curl -X DELETE http://localhost:9091/metrics/job/"
curl -s http://localhost:9091/metrics | \\
grep push_time_seconds | \\
grep -Ev \'^#\' | \\
while read line
do
last_pushed=$(printf "%.f" `echo $line | awk \'{print $2}\'`)
job_name=$(echo $line | \\
awk -F \'}\' \'{print $1}\' | \\
grep -o \'job=.*\' | \\
cut -f1 -d \',\' | \\
cut -f2 -d\'=\' | \\
tr -d \'"\')
std_unix_time_now=$(date +%s)
interval_seconds=$((std_unix_time_now - last_pushed))
[ $interval_seconds -gt 180 ] \\
&& eval $del_req$job_name && echo "$(date), Deleted job group - $job_name" \\
|| echo "$(date), Purge action skipped. Interval not satisfied" # adjust interval_seconds as per requirement
done
sleep 3600
done
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
ephemeral-storage-exporter:
info:
description: installs exporter for extracting ephemeral storage metrics from a Kubernetes cluster
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
monitoring: true
helm_repo_url: https://jmcgrath207.github.io/k8s-ephemeral-storage-metrics/chart
helmrelease_spec:
releaseName: ephemeral-storage-exporter
targetNamespace: cattle-monitoring-system
chart:
spec:
chart: k8s-ephemeral-storage-metrics
version: 1.18.2
values:
fullnameOverride: \'ephemeral-storage-exporter\'
replicas: 1
log_level: info
serviceMonitor:
enable: true
prometheus:
enable: true
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
kepler:
info:
description: installs Kepler (Kubernetes-based Efficient Power Level Exporter) exporter for Prometheus
maturity: stable
enabled: false
unit_templates:
- base-deps
depends_on:
monitoring-crd: true
helm_repo_url: https://sustainable-computing-io.github.io/kepler-helm-chart
helmrelease_spec:
releaseName: kepler
targetNamespace: kepler
chart:
spec:
chart: kepler
version: 0.6.1
install:
createNamespace: false
values:
extraEnvVars:
# set the Prometheus scrape interval for the kepler ServiceMonitor
PROMETHEUS_SCRAPE_INTERVAL: 30s
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
labels:
release: rancher-monitoring
redfish:
enabled: false
snmp-exporter:
info:
description: installs SNMP exporter
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
- \'{{ .Values.snmp.auth | empty | not }}\'
unit_templates:
- base-deps
depends_on:
monitoring: true
snmp-exporter-config: true
helm_repo_url: https://prometheus-community.github.io/helm-charts
helmrelease_spec:
releaseName: snmp-exporter
targetNamespace: snmp-exporter
chart:
spec:
chart: prometheus-snmp-exporter
version: 9.6.2
values:
fullnameOverride: \'snmp-exporter\'
replicas: 1
extraArgs:
- "--config.file=/config/snmp.yaml"
podAnnotations:
sylva/snmp-values: \'{{ .Values.snmp | toJson | sha256sum | trunc 8 }}\'
serviceMonitor:
enabled: false
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
extraConfigmapMounts: # this ConfigMap is a byproduct of the snmp-exporter-config unit
- name: snmp-config
mountPath: /config
configMap: snmp-exporter-config
snmp-exporter-config:
info:
description: contains OID files and generates configuration needed by the snmp-exporter
maturity: beta
enabled_conditions:
- \'{{ tuple . "snmp-exporter" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
repo: sylva-snmp-resources
helm_chart_artifact_name: sylva-snmp-resources
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: snmp-exporter
install:
createNamespace: true
helm_secret_values:
auth: \'{{ .Values.snmp.auth | include "preserve-type" }}\'
sylva-dashboards:
info:
description: adds Sylva-specific Grafana dashboards
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
monitoring: true
repo: sylva-dashboards
helm_chart_artifact_name: sylva-dashboards
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: sylva-dashboards
install:
createNamespace: true
values:
namespace: sylva-dashboards
optional_dashboards: \'{{ .Values._internal.monitoring.conditionals | include "preserve-type" }}\'
multus-init:
info:
description: reconfigure Calico to prevent it from installing some CNI binaries concurrently with Multus
internal: true
enabled_conditions:
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
- \'{{ tuple . "calico" | include "unit-enabled" }}\'
- \'{{ tuple . "multus" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
kyverno: true
calico: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/skip-calico-cni
wait: true
multus:
info:
description: installs Multus
maturity: stable
enabled: false
unit_templates:
- base-deps
depends_on:
\'{{ .Values._internal.calico_readiness_unit }}\': \'{{ tuple . "calico" | include "unit-enabled" }}\'
multus-init: \'{{ tuple . "multus-init" | include "unit-enabled" }}\'
kustomization_spec:
prune: false # This prevents the deletion of Multus resources when the unit is disabled after an initial deployment
helm_repo_url: https://rke2-charts.rancher.io/
helm_chart_versions:
v4.2.106: \'{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}\'
v4.2.205: \'{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}\'
helmrelease_spec:
chart:
spec:
chart: rke2-multus
version: "" # will be defined by helm_chart_versions
targetNamespace: kube-system
install:
createNamespace: false
values:
rke2-whereabouts:
enabled: true
tolerations: []
_postRenderers:
- kustomize:
patches:
- patch: |-
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: multus-rke2-whereabouts
namespace: kube-system
labels:
$patch: delete
app: whereabouts
- patch: |-
kind: ServiceAccount
apiVersion: v1
metadata:
name: multus-rke2-whereabouts
namespace: kube-system
labels:
$patch: delete
app: whereabouts
- patch: |-
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: multus
namespace: kube-system
spec:
template:
metadata:
annotations:
sylva/force-reinstall-cni: done
multus-cleanup:
info:
description: periodically cleans multus cache
details: >
Multus does not cleans up /var/lib/cni/multus automatically,
leading to inodes starvation on the host file-system.
We need to handle cleanup by ourselves for now.
internal: true
enabled_conditions:
- \'{{ tuple . "multus" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
multus: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/multus-cleanup
targetNamespace: kube-system
wait: true
multus-ready:
info:
description: checks that Multus is ready
details: >
This unit only has dependencies, it does not create resources.
It performs healthchecks outside of the multus unit,
in order to properly target workload cluster when we deploy multus in it.
internal: true
unit_templates:
- base-deps
- dummy
enabled_conditions:
- \'{{ tuple . "multus" | include "unit-enabled" }}\'
depends_on:
multus: true
kustomization_spec:
healthChecks:
- apiVersion: apps/v1
kind: DaemonSet
name: multus
namespace: kube-system
- apiVersion: apps/v1
kind: DaemonSet
name: multus-rke2-whereabouts
namespace: kube-system
multus-uninstall-cleanup:
info:
description: deletes Multus resources that are not cleaned up by the uninstall process
details: >
When Multus is uninstalled, some resources remain on the node.
This unit cleans up those resources to ensure a clean state.
internal: true
enabled_conditions:
- \'{{ .Values._internal.state.is_multus_uninstall }}\'
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/multus-uninstall-cleanup
wait: true
multus-helmrelease-cleanup:
info:
description: removes the Multus HelmRelease resource when the unit is uninstalled
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "multus-uninstall-cleanup" | include "unit-enabled" }}\'
depends_on:
multus-uninstall-cleanup: true
kustomization_spec:
targetNamespace: \'{{ .Release.Namespace }}\'
_patches:
- \'{{ include "kube-job-add-env-var-patch" (dict "RELEASE_NAMESPACE" .Release.Namespace )}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/multus-delete-helmrelease.sh") }}\'
sriov-crd:
info:
description: installs CRDs for SR-IOV Network operator
maturity: stable
hidden: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "sriov-network-operator" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
multus-ready: true
helm_repo_url: https://suse-edge.github.io/charts/
helmrelease_spec:
releaseName: sriov-crd
targetNamespace: cattle-sriov-system
install:
createNamespace: false
chart:
spec:
chart: sriov-crd
version: 1.5.2+up1.5.0
kustomization_spec:
prune: false
sriov-network-operator:
info:
description: installs SR-IOV operator
maturity: stable
unit_templates:
- base-deps
enabled: \'{{ or (tuple . "sriov" | include "unit-enabled") (not (empty .Values.sriov.node_policies)) }}\' # for backward compatibility
depends_on:
namespace-defs: true
sriov-crd: true
kyverno: true
helm_repo_url: https://suse-edge.github.io/charts/
helmrelease_spec:
releaseName: sriov
targetNamespace: cattle-sriov-system
chart:
spec:
chart: sriov-network-operator
version: 1.5.2+up1.5.0
values:
operator:
admissionControllers:
enabled: true
certificates:
secretNames:
operator: "operator-webhook-cert"
injector: "network-resources-injector-cert"
certManager:
enabled: true
generateSelfSigned: true
kustomization_spec:
_components:
- ../kyverno-policies/sriov-network-operator
sriov:
info:
description: obsolete - replaced by sriov-network-operator
details: dummy unit which only enables sriov-network-operator for backwark compatibility
internal: true
enabled: false
unit_templates:
- dummy
sriov-resources:
info:
description: configures SRIOV resources
internal: true
enabled_conditions:
- \'{{ not (empty .Values.sriov.node_policies) }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sriov-network-operator: true
repo: sriov-resources
helm_chart_artifact_name: sriov-resources
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: cattle-sriov-system
values:
node_policies: \'{{ .Values.sriov.node_policies | include "preserve-type" }}\'
rke2-calico-coredns-cleanup:
info:
description: remove rke2-coredns and rke2-calico-crd HelmChart resources
internal: true
one_shot: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" }}\'
- \'{{ .Values._internal.state.is_upgrade }}\'
kustomization_spec:
targetNamespace: kube-system
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/rke2-calico-coredns-cleanup.sh") }}\'
coredns:
info:
description: installs rke2-coredns
labels:
sylva-units/protected: ""
unit_templates:
- base-deps
enabled_conditions:
- \'{{ or (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk")
(.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr")
}}\'
# with kubeadm, this unit is just a plain dummy unit that is here only to ensure
# that on upgrade from Sylva 1.3 the \'coredns\' Kustomization (functionally replaced
# by the \'coredns-config\' Kustomization) is patched with "prune: false" instead
# of being deleted
depends_on:
coredns-config: \'{{ tuple . "coredns-config" | include "unit-enabled" }}\'
rke2-calico-coredns-cleanup: \'{{ tuple . "rke2-calico-coredns-cleanup" | include "unit-enabled" }}\'
kustomization_spec:
prune: \'{{ tuple false ( .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk") | include "set-only-if" }}\'
helm_repo_url: https://rke2-charts.rancher.io
helm_chart_versions:
1.42.302: >-
{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}
1.43.302: >-
{{ include "k8s-version-match" (tuple ">=1.31.0" .Values._internal.k8s_version) }}
helmrelease_spec:
suspend: \'{{ tuple true ( .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk") | include "set-only-if" }}\'
driftDetection:
ignore:
- target:
# deleting configmap is a desired action, in this way we reconfigure coredns
# in case of okd deployments, drift detection should ignore it
kind: Deployment
name: rke2-coredns-rke2-coredns
paths:
- /spec/template/spec/volumes/0/configMap/items
- target:
kind: Service
name: rke2-coredns-rke2-coredns
paths:
- /spec/ports
releaseName: rke2-coredns
targetNamespace: kube-system
chart:
spec:
chart: rke2-coredns
version: "" # will be defined by helm_chart_versions
# by adding option "skipConfig: true" into values, the chart will not
# create/control the configMap responsible with the coredns configuration
# and let coredns-config unit to manage it
values: >-
{{ $value := (dict "deployment" (dict "skipConfig" "true")) }}
{{ tuple $value .Values.cluster.coredns.helm_values | include "merge-append" }}
{{ $value | include "preserve-type" }}
coredns-config:
info:
description: configures DNS inside cluster
internal: true
unit_templates:
- base-deps
depends_on:
rke2-calico-coredns-cleanup: \'{{ tuple . "rke2-calico-coredns-cleanup" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/coredns
wait: true
_patches:
- target:
kind: ConfigMap
name: coredns
patch: |
- op: replace
path: /metadata/name
value: >-
{{- if eq .Values.cluster.capi_providers.bootstrap_provider "cabpk" }}
coredns
{{- else if eq .Values.cluster.capi_providers.bootstrap_provider "cabpck" }}
ck-dns-coredns
{{- else }}
rke2-coredns-rke2-coredns
{{- end }}
postBuild:
substitute:
CLUSTER_VIRTUAL_IP: \'{{ .Values.cluster_virtual_ip }}\'
CLUSTER_DOMAIN: \'{{ .Values.cluster_domain }}\'
nfs-ganesha-init:
info:
description: Define persistent volume claim for NFS Ganesha
maturity: experimental
internal: true
unit_templates:
- base-deps
depends_on:
namespace-defs: true
\'{{ .Values._internal.default_storage_class_unit }}\': true
enabled_conditions:
- \'{{ tuple . "nfs-ganesha" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/nfs-ganesha-init
targetNamespace: nfs-ganesha
wait: true
postBuild:
substitute:
storage_size: 5Gi
storage_class: \'{{ .Values._internal.default_storage_class }}\'
nfs-ganesha:
info:
description: manages NFS Ganesha CSI provisioner
maturity: experimental
enabled: false
unit_templates:
- base-deps
depends_on:
namespace-defs: true
nfs-ganesha-init: true
helm_repo_url: https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner/
helmrelease_spec:
targetNamespace: nfs-ganesha
chart:
spec:
chart: nfs-server-provisioner
version: 1.8.0
values:
replicaCount: 1
extraArgs:
device-based-fsids: false
persistence:
enabled: true
existingClaim: nfs-ganesha-data-pvc
storageClass:
name: nfs-ganesha
allowVolumeExpansion: true
mountOptions:
- vers=4.1
- retrans=2
- timeo=30
image:
repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/nfs-provisioner
tag: 6.5.custom # built based on https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner/pull/152 and embedding https://github.com/nfs-ganesha/ntirpc/tree/a392d47b26e216cbfcc362ed146c94b98894394a (fix from https://github.com/nfs-ganesha/ntirpc/pull/318)
_postRenderers:
- kustomize:
patches:
- target:
kind: StatefulSet
patch: |-
- op: add
path: /spec/template/spec/containers/0/livenessProbe
value:
exec:
command:
- sh
- -c
- rpcinfo -t localhost nfs || exit 1
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
- target:
kind: StatefulSet
patch: |-
- op: add
path: /spec/template/spec/containers/0/readinessProbe
value:
exec:
command:
- sh
- -c
- rpcinfo -t localhost nfs || exit 1
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
- target:
kind: StatefulSet
patch: |-
- op: add
path: /spec/template/spec/volumes/-
value:
name: vfs-template
configMap:
name: ganesha-vfs-template
- op: add
path: /spec/template/spec/volumes/-
value:
name: vfs-conf-update
configMap:
name: vfs-conf-update
defaultMode: 0644
- op: add
path: /spec/template/spec/initContainers
value:
- name: vfs-conf-update
image: busybox:1.36.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
runAsGroup: 0
command:
- /bin/sh
- /etc/ganesha/scripts/vfs-conf-update.sh
volumeMounts:
- name: data
mountPath: /export
- name: vfs-template
mountPath: /etc/ganesha
readOnly: true
- name: vfs-conf-update
mountPath: /etc/ganesha/scripts
readOnly: true
test-nfs-ganesha:
enabled: \'{{ .Values.env_type | eq "ci" }}\'
info:
description: Perform testing for RWX enabled PVs created from NFS Ganesha
internal: true
test: true
unit_templates:
- base-deps
depends_on:
namespace-defs: true
nfs-ganesha: true
enabled_conditions:
- \'{{ tuple . "nfs-ganesha" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/test-nfs-ganesha
targetNamespace: nfs-ganesha
wait: true
force: true
ceph-csi-cephfs:
info:
description: Installs Ceph-CSI
maturity: beta
enabled: false
unit_templates:
- base-deps
depends_on:
namespace-defs: true
openshift-security-context-constraints: \'{{ eq .Values.cluster.capi_providers.bootstrap_provider "cabpoa" }}\'
helm_repo_url: https://ceph.github.io/csi-charts
helmrelease_spec:
targetNamespace: ceph-csi-cephfs
chart:
spec:
chart: ceph-csi-cephfs
version: 3.14.0
values:
provisioner:
replicaCount: \'{{ .Values.cluster.control_plane_replicas }}\'
storageClass:
create: true
name: cephfs-csi
clusterID: \'{{ .Values.ceph.cephfs_csi.clusterID }}\'
fsName: \'{{ .Values.ceph.cephfs_csi.fs_name }}\'
annotations:
storageclass.kubernetes.io/is-default-class: \'{{ .Values._internal.default_storage_class | eq "cephfs-csi" }}\'
csiConfig:
- clusterID: \'{{ .Values.ceph.cephfs_csi.clusterID }}\'
monitors: \'{{ .Values.ceph.cephfs_csi.monitors_ips | include "preserve-type" }}\'
helm_secret_values:
secret:
create: true
userID: \'{{ .Values.ceph.cephfs_csi.adminID }}\'
userKey: \'{{ .Values.ceph.cephfs_csi.adminKey }}\'
longhorn-crd:
info:
description: installs Longhorn CRDs
maturity: stable
hidden: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
helm_repo_url: https://charts.rancher.io/
helmrelease_spec:
releaseName: longhorn-crd
targetNamespace: longhorn-system
chart:
spec:
chart: longhorn-crd
version: 106.2.1+up1.8.2
kustomization_spec:
prune: false
longhorn:
info:
description: installs Longhorn CSI
maturity: stable
enabled_conditions:
- \'{{ eq .Values.cluster.capi_providers.infra_provider "capm3" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
longhorn-crd: true
monitoring-crd: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
# Ensure that all volumes are in a healthy state prior to upgrade longhorn
longhorn-volumes-healthy: \'{{ .Values._internal.state.is_upgrade }}\'
# Ensure that longhorn is upgraded after cluster. This should be inherited from base-deps,
# but we add this as a safeguard, as some unit could add an indirect dependency of cluster to longhorn.
# Following dependency will prevent that as it would create a loop in such cases.
cluster-machines-ready: \'{{ .Values._internal.state.is_upgrade }}\'
helm_repo_url: https://charts.rancher.io/
helmrelease_spec:
releaseName: longhorn
targetNamespace: longhorn-system
install:
createNamespace: false
chart:
spec:
chart: longhorn
version: 106.2.1+up1.8.2
values:
metrics:
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
defaultSettings:
createDefaultDiskLabeledNodes: true
allowVolumeCreationWithDegradedAvailability: false
storageMinimalAvailablePercentage: 10
replicaReplenishmentWaitInterval: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" | ternary 3600 600 | include "preserve-type" }}\'
nodeDrainPolicy: block-for-eviction-if-contains-last-replica
concurrentAutomaticEngineUpgradePerNodeLimit: 3
storageReservedPercentageForDefaultDisk: 0 # Percentage of each default disk to reserve (not used for volume scheduling).
# Intended to prevent overfilling OS-shared disks.
# Set to 0 to make full disk space schedulable.
persistence:
defaultClass: \'{{ .Values._internal.default_storage_class | eq "longhorn" | include "preserve-type" }}\'
longhorn-volumes-healthy:
info:
description: wait for all longhorn volumes to be in a healthy state (attached/healthy or detached/unknown)
internal: true
annotations:
sylvactl/unitTimeout: 30m
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
- \'{{ .Values._internal.state.is_upgrade }}\'
depends_on:
cluster-machines-ready: \'{{ .Values._internal.state.is_upgrade }}\'
unit_templates:
- base-deps
- kube-job
repo: sylva-core
kustomization_spec:
targetNamespace: longhorn-system
_patches:
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/longhorn-volumes-healthy.sh") }}\'
longhorn-engine-image-cleanup:
info:
description: kyverno cleanup policy to delete old Longhorn engineimages that are left-over after upgrade
internal: true
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
- \'{{ tuple . "kyverno" | include "unit-enabled" }}\'
depends_on:
longhorn: true
kyverno: true
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/longhorn-engine-image-cleanup
longhorn-update-stale-replica-timeout:
info:
description: Kyverno policy to set the staleReplicaTimeout value to 60 for all Longhorn volumes
internal: true
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
depends_on:
longhorn: \'{{ not .Values._internal.state.is_upgrade }}\'
kyverno: true
unit_templates:
- base-deps
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kyverno-policies/update-stalereplicatimeout
longhorn-instance-manager-cleanup:
info:
description: cronjob to cleanup Longhorn instance-manager pods that are preventing node drain
internal: true
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
unit_templates:
- base-deps
- kube-cronjob
kustomization_spec:
postBuild:
substitute:
NAMESPACE: longhorn-system
_patches:
- target:
kind: CronJob
patch: |
- op: replace
path: /spec/schedule
value: "*/5 * * * *" # Every 5 minutes
- target:
kind: ClusterRole
patch: |
- op: replace
path: /rules
value:
- apiGroups: [""]
resources: [nodes]
verbs: [list, get, patch]
- \'{{ include "kube-cronjob-replace-script-patch" (.Files.Get "scripts/longhorn-instance-manager-cleanup.sh") }}\'
rancher-default-roles:
info:
description: Create Rancher role templates
details: |
This unit creates a set of additional role templates which are likely to be needed by many
clusters.
internal: true
enabled_conditions:
- \'{{ tuple . "rancher" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
rancher: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/rancher-default-roles
os-images-info:
info:
description: Creates a list of os images
details: |
This unit creates a configmap containing information on operating system images for use with CAPO and CAPM3.
internal: true
enabled_conditions:
- \'{{ or (tuple . "capm3" | include "unit-enabled")
(.Values.cluster.capi_providers.infra_provider | eq "capo")
(.Values.cluster.capi_providers.infra_provider | eq "capm3")
}}\'
unit_templates:
- base-deps
- kube-job
kustomization_spec:
_components:
- \'{{ tuple "./components/certs" (not .Values.oci_registry_insecure) | include "set-only-if" }}\'
postBuild:
substitute:
JOB_CHECKSUM: |-
{{- tuple . "no_proxy_additional" | include "interpret" -}}
{{
tuple . (list
.Values._internal.sylva_core_version
(include "generate-os-images" .)
.Values.proxies
.Values.no_proxy_additional
.Values.oci_registry_insecure
) | include "interpret-and-hash"
}}
EXTRA_CA_CERTS: \'{{ tuple (.Values.oci_registry_extra_ca_certs | default "" | b64enc) (not .Values.oci_registry_insecure) | include "set-only-if" }}\'
_patches:
- \'{{ include "kube-job-replace-image-patch" .Values._internal.oci_tools_image }}\'
- \'{{ include "kube-job-add-env-var-patch" (dict
"https_proxy" .Values.mgmt_cluster_state_values.proxies.https_proxy
"no_proxy" .Values.mgmt_cluster_state_values.proxies.no_proxy
"oci_registry_insecure" .Values.oci_registry_insecure
"SKIP_IMAGE_VERIFICATION" .Values.security.os_images.skip_signing_check
)}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/create-os-images-info.sh") }}\'
- \'{{ include "kube-job-add-files-to-configmap-patch" (dict "images.yaml" (include "generate-os-images" .)) }}\'
wait: false
healthChecks:
- apiVersion: batch/v1
kind: Job
name: os-images-info
namespace: \'{{ .Release.Namespace }}\'
- apiVersion: v1
kind: ConfigMap
name: os-images-info
namespace: \'{{ .Release.Namespace }}\'
os-image-server:
info:
description: >
Deploys a web server on management cluster
which serves OS images for baremetal clusters.
maturity: stable
enabled_conditions:
- \'{{ tuple . "metal3" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': \'{{ not .Values._internal.state.is_upgrade }}\'
os-images-info: true
repo: os-image-server
annotations:
sylvactl/readyMessage: |
{{- $osImageFqdn := (coalesce .Values.external_hostnames.os_image_server .Values.display_external_ip) -}}
OS images are served at URLs like: https://{{ $osImageFqdn }}/<filename>(.sha256sum)
{{- if not (eq $osImageFqdn .Values.display_external_ip)}}
({{ .Values.external_hostnames.os_image_server }} must resolve to {{ .Values.display_external_ip }})
{{- end }}
sylvactl/unitTimeout: 30m
helmrelease_spec:
chart:
spec:
chart: ./charts/os-image-server
targetNamespace: os-images
install:
createNamespace: true
timeout: 168h # leave plenty of time to download OS images in initContainers
values:
outputConfigMap:
namespace: sylva-system
name: capm3-os-image-server-os-images
downloader:
proxy: \'{{ get .Values.proxies "https_proxy" }}\'
no_proxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
extra_ca_certs: \'{{ .Values.oci_registry_extra_ca_certs | include "set-if-defined" }}\'
ingress:
className: nginx
hosts:
- host: \'{{ .Values.external_hostnames.os_image_server }}\'
osImagePersistenceDefaults:
enabled: true
size: \'{{ .Values.os_images_default_download_storage_space }}\'
storageClass: \'{{ .Values._internal.default_storage_class }}\'
os_image_selectors: >-
{{- tuple . "cluster" | include "interpret" -}}
{{
concat (tuple .Values.cluster | include "find-cluster-image-selectors" | fromJsonArray)
(.Values.os_image_server_additional_selectors | values)
| include "preserve-type"
}}
valuesFrom:
- kind: ConfigMap
name: os-images-info # this configmap is a byproduct of the os-images-info unit
capo-contrail-bgpaas:
info:
description: installs CAPO Contrail BGPaaS controller
maturity: stable
enabled: false
repo: capo-contrail-bgpaas
helm_chart_artifact_name: capo-contrail-bgpaas
unit_templates:
- base-deps
depends_on:
heat-operator: \'{{ tuple . "heat-operator" | include "unit-enabled" }}\'
capo: \'{{ tuple . "capo" | include "unit-enabled" }}\'
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: capo-contrail-bgpaas-system
install:
createNamespace: true
values:
conf:
env:
DEFAULT_PORT: \'0\'
DEFAULT_ASN: \'64512\'
opennebula-cpi:
info:
description: configures OpenNebula Cloud controller manager
internal: true
unit_templates: []
depends_on:
cluster: \'{{ tuple . "cluster" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ eq .Values.cluster.capi_providers.infra_provider "capone" }}\'
helm_repo_url: https://opennebula.github.io/cloud-provider-opennebula/charts
helmrelease_spec:
chart:
spec:
chart: opennebula-cpi
version: "0.1.2"
storageNamespace: kube-system
targetNamespace: kube-system
values:
CCM_CTL: cloud-node,cloud-node-lifecycle
CLUSTER_NAME: "{{ .Values.cluster.name }}"
PUBLIC_NETWORK_NAME: "{{ .Values.cluster.capone.public_network }}"
PRIVATE_NETWORK_NAME: ""
ROUTER_TEMPLATE_NAME: ""
nodeSelector:
node-role.kubernetes.io/control-plane: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary "true" "" }}\'
helm_secret_values:
ONE_XMLRPC: "{{ .Values.cluster.capone.ONE_XMLRPC }}"
ONE_AUTH: "{{ .Values.cluster.capone.ONE_AUTH }}"
vsphere-cpi:
info:
description: configures Vsphere Cloud controller manager
internal: true
unit_templates: [] # the vsphere-cpi handles the rollout of control plane nodes so must run as soon as the cluster has been created
depends_on:
cluster: \'{{ tuple . "cluster" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ eq .Values.cluster.capi_providers.infra_provider "capv" }}\'
helm_repo_url: https://kubernetes.github.io/cloud-provider-vsphere
helm_chart_versions:
v1.30.0: \'{{ include "k8s-version-match" (tuple ">=1.30.0,<1.31.0" .Values._internal.k8s_version) }}\'
v1.31.0: \'{{ include "k8s-version-match" (tuple ">=1.31.0,<1.32.0" .Values._internal.k8s_version) }}\'
1.32.1: \'{{ include "k8s-version-match" (tuple ">=1.32.0" .Values._internal.k8s_version) }}\'
helmrelease_spec:
chart:
spec:
chart: vsphere-cpi
version: "" # defined by helm_chart_versions
releaseName: vsphere-cpi
storageNamespace: kube-system
targetNamespace: kube-system
values:
config:
enabled: true
vcenter: \'{{ .Values.cluster.capv.server }}\'
datacenter: \'{{ .Values.cluster.capv.dataCenter }}\'
thumbprint: \'{{ .Values.cluster.capv.tlsThumbprint }}\'
daemonset:
image: registry.k8s.io/cloud-pv-vsphere/cloud-provider-vsphere
_postRenderers:
- kustomize:
patches:
- patch: |-
kind: ConfigMap
apiVersion: v1
metadata:
name: vsphere-cloud-config
namespace: kube-system
data:
vsphere.conf: | {{ index (index .Values.vsphere "vsphere-cpi") "vsphere_conf" | toYaml | nindent 4 }}
helm_secret_values:
config:
username: \'{{ .Values.cluster.capv.username }}\'
password: \'{{ .Values.cluster.capv.password }}\'
vsphere-csi-driver:
info:
description: installs Vsphere CSI
maturity: stable
unit_templates:
- base-deps
enabled_conditions:
- \'{{ eq .Values.cluster.capi_providers.infra_provider "capv" }}\'
repo: sylva-core
kustomization_spec:
path: kustomize-units/vsphere-csi-driver
targetNamespace: vmware-system-csi
wait: true
postBuild:
substitute:
SERVER: \'{{ .Values.cluster.capv.server }}\'
DATACENTER: \'{{ .Values.cluster.capv.dataCenter }}\'
CLUSTER_ID: \'{{ printf "%s-%s" .Values.cluster.name (randAlphaNum 10) | trunc 64 }}\'
STORAGE_POLICY_NAME: \'{{ .Values.cluster.capv.storagePolicyName | default "" }}\'
CONTROLLER_REPLICAS: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
kustomization_substitute_secrets:
USERNAME: \'{{ .Values.cluster.capv.username }}\'
PASSWORD: \'{{ .Values.cluster.capv.password }}\'
sandbox-privileged-namespace:
info:
description: >
creates the sandbox namespace used
to perform privileged operations like debugging a node.
It cannot be enabled when env_type=prod
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ or (.Values.env_type | eq "dev") (.Values.env_type | eq "ci") }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/sandbox-privileged-namespace
wait: true
prune: true
# Gitea-unit
gitea-secrets:
info:
description: >
create random secret that will be used by gitea application.
secrets are sync with vault.
internal: true
enabled_conditions:
- \'{{ tuple . "gitea" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
vault-config-operator: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/gitea/secrets
postBuild:
substitute:
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
wait: true
gitea-eso:
info:
description: >
write secrets in gitea namespace in gitea expected format
internal: true
enabled_conditions:
- \'{{ tuple . "gitea" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
eso-secret-stores: true
gitea-keycloak-resources: true
gitea-secrets: true
sylva-ca: true
external-secrets-operator: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.gitea.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/gitea/eso
wait: false
healthChecks:
- apiVersion: v1
kind: Secret
name: gitea-keycloak-oidc-auth
namespace: gitea
- apiVersion: v1
kind: Secret
name: gitea-admin
namespace: gitea
- apiVersion: v1
kind: Secret
name: gitea-postgres-secrets
namespace: gitea
- apiVersion: v1
kind: Secret
name: gitea-redis
namespace: gitea
- apiVersion: v1
kind: Secret
name: sylva-ca.crt
namespace: gitea
postBuild:
substitute:
SERVICE: gitea
SERVICE_DNS: \'{{ .Values.external_hostnames.gitea }}\'
CERT: \'{{ .Values.external_certificates.gitea.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
_components:
- \'{{ ternary "../../tls-components/tls-secret" "../../tls-components/tls-certificate" (hasKey .Values.external_certificates.gitea "cert") }}\'
- "../../tls-components/sylva-ca"
gitea-keycloak-resources:
info:
description: >
deploys Gitea OIDC client in Sylva\'s Keycloak realm
internal: true
enabled_conditions:
- \'{{ tuple . "gitea" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
keycloak: true
keycloak-legacy-operator: true
keycloak-resources: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/gitea/keycloak-resources
targetNamespace: keycloak
postBuild:
substitute:
GITEA_DNS: \'{{ .Values.external_hostnames.gitea }}\'
healthChecks:
- apiVersion: v1
kind: Secret
name: keycloak-client-secret-gitea-client # this secret is a byproduct of the gitea-client KeycloakClient resource
namespace: keycloak
gitea-redis:
info:
description: installs Redis cluster for Gitea
maturity: stable
enabled_conditions:
- \'{{ tuple . "gitea" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
gitea-eso: true
gitea-keycloak-resources: true
gitea-secrets: true
\'{{ .Values._internal.default_storage_class_unit }}\': true
repo: bitnami-redis-cluster
helmrelease_spec:
chart:
spec:
chart: bitnami/redis-cluster
targetNamespace: gitea
releaseName: gitea-redis
values:
image:
repository: bitnamilegacy/redis-cluster
metrics:
image:
repository: bitnamilegacy/redis-exporter
sysctlImage:
repository: bitnamilegacy/os-shell
volumePermissions:
image:
repository: bitnamilegacy/os-shell
usePassword: true
existingSecret: gitea-redis
existingSecretPasswordKey: password
global:
storageClass: "{{ .Values._internal.default_storage_class }}"
persistence:
size: 8Gi
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
gitea-postgresql-ha:
info:
description: installs PostgreSQL HA cluster for Gitea
maturity: stable
enabled_conditions:
- \'{{ tuple . "gitea" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
gitea-eso: true
gitea-keycloak-resources: true
gitea-secrets: true
\'{{ .Values._internal.default_storage_class_unit }}\': true
repo: bitnami-postgresql-ha
helmrelease_spec:
chart:
spec:
chart: bitnami/postgresql-ha
targetNamespace: gitea
releaseName: gitea-postgres
values:
global:
storageClass: "{{ .Values._internal.default_storage_class }}"
postgresql:
image:
repository: bitnamilegacy/postgresql-repmgr
username: gitea
database: gitea
existingSecret: gitea-postgres-secrets
pgpool:
image:
repository: bitnamilegacy/pgpool
existingSecret: gitea-postgres-secrets
persistence:
size: 8Gi
metrics:
image:
repository: bitnamilegacy/postgres-exporter
volumePermissions:
image:
repository: bitnamilegacy/os-shell
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
gitea:
info:
description: installs Gitea
maturity: stable
enabled: false
unit_templates:
- base-deps
depends_on:
cert-manager: true
gitea-eso: true
gitea-keycloak-resources: true
gitea-secrets: true
gitea-redis: true
gitea-postgresql-ha: true
namespace-defs: true
\'{{ .Values._internal.default_storage_class_unit }}\': true
annotations:
sylvactl/readyMessage: "Gitea can be reached at https://{{ .Values.external_hostnames.gitea }} ({{ .Values._internal.display_external_ip_msg }})"
helm_repo_url: https://dl.gitea.com/charts/
helmrelease_spec:
chart:
spec:
chart: gitea
version: 11.0.1
targetNamespace: gitea
releaseName: gitea
values:
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
redis-cluster:
enabled: false
postgresql:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: true
size: 10Gi
storageClass: "{{ .Values._internal.default_storage_class }}"
accessModes:
- "{{ .Values._internal.default_storage_class_access_mode_rwx }}"
replicaCount: >-
{{- if eq .Values._internal.default_storage_class_RWX_support "true" -}}
{{ 3 | include "preserve-type" }}
{{- else -}}
{{ 1 | include "preserve-type" }}
{{- end -}}
strategy:
type: \'{{ eq (tuple . .Values._internal.default_storage_class_RWX_support | include "interpret-as-string") "true" | ternary "RollingUpdate" "Recreate" }}\'
actions:
provisioning:
publish:
repository: alpine/kubectl
tag: 1.34.1
gitea:
admin:
existingSecret: gitea-admin
metrics:
enabled: true
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
oauth:
- name: "keycloak-sylva"
provider: "openidConnect"
existingSecret: gitea-keycloak-oidc-auth
autoDiscoverUrl: \'https://{{ .Values.external_hostnames.keycloak }}/realms/sylva/.well-known/openid-configuration\'
config:
cron:
ENABLED: false
cron.GIT_GC_REPOS:
ENABLED: false
server:
ENABLE_PPROF: true
database:
DB_TYPE: postgres
HOST: gitea-postgres-postgresql-ha-pgpool.gitea.svc.cluster.local:5432
NAME: gitea
USER: gitea
# define by env variable: PASSWD
SCHEMA: public
session:
PROVIDER: redis
# define by env variable: PROVIDER_CONFIG
cache:
ADAPTER: redis
# define by env variable: HOST
queue:
TYPE: redis
# define by env variable: CONN_STR
indexer:
REPO_INDEXER_ENABLED: false
ISSUE_INDEXER_ENABLED: false
additionalConfigFromEnvs:
- name: GITEA__DATABASE__PASSWD # define DB password
valueFrom:
secretKeyRef:
key: password
name: gitea-postgres-secrets
- name: GITEA__QUEUE__CONN_STR # redis connection string for queue
valueFrom:
secretKeyRef:
key: connection_string
name: gitea-redis
- name: GITEA__SESSION__PROVIDER_CONFIG # redis connection string for session
valueFrom:
secretKeyRef:
key: connection_string
name: gitea-redis
- name: GITEA__CACHE__HOST # redis connection string for queue
valueFrom:
secretKeyRef:
key: connection_string
name: gitea-redis
ingress:
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 8m
tls:
- hosts:
- \'{{ .Values.external_hostnames.gitea }}\'
secretName: gitea-tls
hosts:
- host: \'{{ .Values.external_hostnames.gitea }}\'
paths:
- path: /
pathType: Prefix
extraVolumes:
- secret:
defaultMode: 420
secretName: sylva-ca.crt
name: sylva-ca
extraVolumeMounts:
- mountPath: /etc/ssl/certs/sylva-ca.crt
name: sylva-ca
readOnly: true
subPath: ca.crt
# Kunai-unit
kunai-secrets:
info:
description: >
create random secret that will be used by kunai application.
secrets are sync with vault.
internal: true
enabled_conditions:
- \'{{ tuple . "kunai" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
vault-config-operator: true
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/kunai/secrets
wait: true
postBuild:
substitute:
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
kunai-eso:
info:
description: >
write secrets in kunai namespace in kunai expected format
internal: true
enabled_conditions:
- \'{{ tuple . "kunai" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
eso-secret-stores: true
keycloak-resources: true
kunai-secrets: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.kunai.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/kunai/eso
wait: false
healthChecks:
- apiVersion: v1
kind: Secret
name: kunai-next-oidc
namespace: kunai
- apiVersion: v1
kind: Secret
name: kunai-next-auth
namespace: kunai
- apiVersion: v1
kind: Secret
name: sylva-ca.crt
namespace: kunai
postBuild:
substitute:
SERVICE: kunai
SERVICE_DNS: \'{{ .Values.external_hostnames.kunai }}\'
CERT: \'{{ .Values.external_certificates.kunai.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
KEYCLOAK_DNS: \'{{ .Values.external_hostnames.keycloak }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
_components:
- \'{{ ternary "../../tls-components/tls-secret" "../../tls-components/tls-certificate" (hasKey .Values.external_certificates.kunai "cert") }}\'
- "../../tls-components/sylva-ca"
kunai-postgres-cnpg:
info:
description: Deploy Postgres cluster for Kunai using Cloud Native PostgreSQL (CNPG)
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "kunai" | include "unit-enabled" }}\'
repo: sylva-core
depends_on:
cnpg-operator: true
namespace-defs: true
kunai-eso: true
kunai-secrets: true
\'{{ .Values._internal.default_storage_class_unit }}\': true
kustomization_spec:
postBuild:
substitute:
KUNAI_POSTGRES_REPLICAS: \'{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}\'
storageClass: \'{{ .Values._internal.default_storage_class }}\'
podAntiAffinityType: \'{{ .Values._internal.ha_cluster.is_ha | ternary "required" "preferred" }}\'
path: ./kustomize-units/kunai-postgres-cnpg
wait: true
kunai:
info:
description: installs Kunai
details: |-
The integration of [Kunai](https://gitlab.com/sylva-projects/sylva-elements/kunai) at this stage should be considered experimental.
Work is in progress to align its integration with workload-cluster-operator and workload-teams-defs.
See https://gitlab.com/groups/sylva-projects/-/epics/58.
maturity: experimental
enabled: false
unit_templates:
- base-deps
depends_on:
cert-manager: true
keycloak-resources: true
kunai-eso: true
kunai-secrets: true
kunai-postgres-cnpg: true
namespace-defs: true
annotations:
sylvactl/readyMessage: "Kunai UI can be reached at https://{{ .Values.external_hostnames.kunai }} ({{ .Values.external_hostnames.kunai }} must resolve to {{ .Values.display_external_ip }})"
helm_repo_url: https://gitlab.com/api/v4/projects/sylva-projects%2Fsylva-elements%2Fkunai/packages/helm/stable
helmrelease_spec:
chart:
spec:
chart: kunai
version: 2.1.2
targetNamespace: kunai
values:
postgresql:
enabled: false
externalDatabase:
existingSecret: cnpg-kunai-app
persistence:
enabled: false
kunai:
nextAuth:
enabled: true
existingsecretname: kunai-next-auth
nextOidc:
existingsecretname: kunai-next-oidc
extraCaCert:
enabled: true
existingsecretname: sylva-ca.crt
existingsecretkey: ca.crt
proxies:
http_proxy: \'{{ .Values.proxies.http_proxy }}\'
https_proxy: \'{{ .Values.proxies.https_proxy }}\'
no_proxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
ingress:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 8m
nginx.ingress.kubernetes.io/large-client-header-buffers: "4 16k"
nginx.ingress.kubernetes.io/proxy-buffer-size: "256k"
tls: false
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.kunai }}\'
secretName: kunai-tls
hostname: \'{{ .Values.external_hostnames.kunai }}\'
minio-operator-init:
info:
description: sets up MinIO certificate for minio-operator
details: it generate certificate
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "minio-operator" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.minio_operator.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/minio-operator-init
wait: true
postBuild:
substitute:
SERVICE: minio-operator-console
SERVICE_DNS: \'{{ .Values.external_hostnames.minio_operator_console }}\'
CERTIFICATE_NAMESPACE: minio-operator
CERT: \'{{ .Values.external_certificates.minio_operator.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
CACERT_SECRET_NAME: operator-ca-tls
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.minio_operator "cert") }}\'
- "../tls-components/sylva-ca"
- "../kyverno-policies/minio-policies/minio-operator-policies"
minio-operator:
info:
description: install MinIO operator
details: MinIO operator is used to manage multiple S3 tenants
maturity: beta
unit_templates:
- base-deps
enabled_conditions:
- \'{{ or (tuple . "minio-monitoring" | include "unit-enabled") (tuple . "minio-logging" | include "unit-enabled") }}\'
depends_on:
namespace-defs: true
minio-operator-init: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
annotations:
sylvactl/readyMessage: "minio operator console can be reached at https://{{ .Values.external_hostnames.minio_operator_console }} ({{ .Values._internal.display_external_ip_msg }})"
repo: minio-operator
helmrelease_spec:
targetNamespace: minio-operator
chart:
spec:
chart: helm/operator
install:
createNamespace: false
values:
operator:
env:
- name: OPERATOR_STS_AUTO_TLS_ENABLED
value: "off"
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 100m
memory: 128Mi
ephemeral-storage: 500Mi
limits:
cpu: 200m
memory: 256Mi
replicaCount: 1
console:
enabled: true
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ingress:
enabled: true
ingressClassName: nginx
host: \'{{ .Values.external_hostnames.minio_operator_console }}\'
path: /
pathType: Prefix
tls:
- hosts:
- \'{{ .Values.external_hostnames.minio_operator_console }}\'
secretName: minio-operator-console-tls
_postRenderers:
- kustomize:
patches:
- target:
kind: ClusterRole
name: minio-operator-role
patch: |
- op: add
path: /rules/-
value:
apiGroups:
- job.min.io
resources:
- \'*\'
verbs:
- \'*\'
- target:
kind: Deployment
name: minio-operator
patch: |
kind: _unused_
metadata:
name: _unused_
spec:
strategy:
rollingUpdate:
maxSurge: 0
template:
spec:
containers:
- name: operator
livenessProbe:
initialDelaySeconds: 60
exec:
command:
- bash
- -c
- "exec 3<>/dev/tcp/localhost/4221"
kustomization_spec:
healthChecks:
- apiVersion: v1
kind: Secret
name: sts-tls
namespace: minio-operator
minio-logging-init:
info:
description: sets up secrets and certificates for minio-logging
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "minio-logging" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
vault: true
vault-config-operator: true
eso-secret-stores: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.minio_logging.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/minio-tenant-init
wait: true
postBuild:
substitute:
TENANT_NS: minio-logging
TENANT_NAME: logging
CLUSTER_DOMAIN: \'{{ .Values.cluster_domain }}\'
SERVICE: minio-logging
SERVICE_DNS: \'{{ .Values.external_hostnames.minio_logging }}\'
CERTIFICATE_NAMESPACE: minio-logging
CERT: \'{{ .Values.external_certificates.minio_logging.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.minio_logging "cert") }}\'
- "../tls-components/sylva-ca"
- "../kyverno-policies/minio-policies/minio-tenant-policies"
minio-logging:
info:
description: creates a MinIO tenant for the logging stack, used as S3 storage by Loki
maturity: beta
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "loki" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
minio-operator: true
minio-logging-init: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
two-replicas-storageclass: \'{{ tuple . "two-replicas-storageclass" | include "unit-enabled" }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': \'{{ not (tuple . "two-replicas-storageclass" | include "unit-enabled") }}\'
annotations:
sylvactl/readyMessage: "MinIO logging tenant console can be reached at https://{{ .Values.external_hostnames.minio_logging_console }} ({{ .Values._internal.display_external_ip_msg }})"
repo: minio-operator
helmrelease_spec:
targetNamespace: minio-logging
chart:
spec:
chart: helm/tenant
install:
createNamespace: false
values:
tenant:
name: logging
serviceMetadata:
minioServiceLabels:
v1.min.io/tenant: logging
configSecret:
existingSecret: true
name: minio-logging-root
accessKey: ""
secretKey: ""
# Use certificate generated by cert-manager in minio-logging-init instead of relyin on minio operator
certificate:
requestAutoCert: false
externalCertSecret:
- name: minio-logging-internal-tls
type: kubernetes.io/tls
# The Kubernetes secret name that contains MinIO environment variable configurations:
configuration:
name: minio-logging-root
# Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning:
users:
- name: minio-logging-user
pools:
- servers: \'{{ min .Values._internal.node_count 4 }}\'
name: pool-0
volumesPerserver: 4
size: 3Gi
storageClassName: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
resources:
limits:
cpu: 500m
memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: v1.min.io/pool
operator: In
values:
- pool-0
- key: v1.min.io/tenant
operator: In
values:
- logging
topologyKey: "kubernetes.io/hostname"
metrics:
enabled: true
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
features:
bucketDNS: false
enableSFTP: false
buckets:
- name: "loki-chunks"
region: "logging-cluster"
objectLock: false
- name: "loki-ruler"
region: "monitoring-cluster"
objectLock: false
- name: "loki-admin"
region: "monitoring-cluster"
objectLock: false
prometheusOperator: false # Prometheus Operator\'s Service Monitor for MinIO Tenant Pods
logging:
anonymous: true
json: true
quiet: true
ingress:
api:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
host: \'{{ .Values.external_hostnames.minio_logging }}\'
path: /
pathType: Prefix
tls:
- hosts:
- \'{{ .Values.external_hostnames.minio_logging }}\'
secretName: minio-logging-tls
console:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
host: \'{{ .Values.external_hostnames.minio_logging_console }}\'
path: /
pathType: Prefix
tls:
- hosts:
- \'{{ .Values.external_hostnames.minio_logging_console }}\'
secretName: minio-logging-tls
kustomization_spec:
healthChecks:
- apiVersion: minio.min.io/v2
kind: Tenant
name: logging
namespace: minio-logging
healthCheckExprs: >-
{{
tuple (list (dict
"apiVersion" "minio.min.io/v2"
"kind" "Tenant"
"current" "status.healthStatus == \\"green\\""
"failed" "status.healthStatus != \\"green\\""
)
)
(lookup
"apiextensions.k8s.io/v1"
"CustomResourceDefinition"
""
"kustomizations.kustomize.toolkit.fluxcd.io"
| toYaml
| contains "HealthCheckExprs"
)
| include "set-only-if"
}}
velero:
info:
description: Deploys Velero and its dependencies
maturity: beta
depends_on:
namespace-defs: true
enabled: false
unit_templates:
- base-deps
helm_repo_url: https://vmware-tanzu.github.io/helm-charts
helmrelease_spec:
chart:
spec:
chart: velero
version: 11.0.0
targetNamespace: velero
install:
createNamespace: false
values:
installCRDs: true
snapshotsEnabled: false
backupsEnabled: true
deployNodeAgent: false
configuration:
backupStorageLocation: []
volumeSnapshotLocation: []
resources:
requests:
cpu: 500m
memory: 128Mi
limits:
cpu: 1000m
memory: 512Mi
kubectl:
image:
repository: alpine/kubectl
tag: 1.34.1
minio-monitoring-init:
info:
description: sets up secrets and certificates for minio-monitoring
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "minio-monitoring" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
vault: true
vault-config-operator: true
eso-secret-stores: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.minio_monitoring.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/minio-tenant-init
wait: true
postBuild:
substitute:
TENANT_NS: minio-monitoring
TENANT_NAME: monitoring
CLUSTER_DOMAIN: \'{{ .Values.cluster_domain }}\'
SERVICE: minio-monitoring
SERVICE_DNS: \'{{ .Values.external_hostnames.minio_monitoring }}\'
CERTIFICATE_NAMESPACE: minio-monitoring
CERT: \'{{ .Values.external_certificates.minio_monitoring.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
K8S_AUTH_PATH: \'{{ .Values.security.vault.paths.k8s }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
VAULT_API: \'{{ .Values.security.vault.external_vault_url | include "set-if-defined" }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.minio_monitoring "cert") }}\'
- "../tls-components/sylva-ca"
- "../kyverno-policies/minio-policies/minio-tenant-policies"
minio-monitoring:
info:
description: creates a MinIO tenant for the monitoring stack, used as S3 storage by Thanos
maturity: beta
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "thanos" | include "unit-enabled" }}\'
# only enable when no custom objstoreConfig is provided
- \'{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}\'
depends_on:
namespace-defs: true
minio-operator: true
minio-monitoring-init: true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
two-replicas-storageclass: \'{{ tuple . "two-replicas-storageclass" | include "unit-enabled" }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': \'{{ not (tuple . "two-replicas-storageclass" | include "unit-enabled") }}\'
annotations:
sylvactl/readyMessage: "MinIO monitoring tenant console can be reached at https://{{ .Values.external_hostnames.minio_monitoring_console }} ({{ .Values._internal.display_external_ip_msg }})"
repo: minio-operator
helmrelease_spec:
targetNamespace: minio-monitoring
chart:
spec:
chart: helm/tenant
install:
createNamespace: false
values:
tenant:
name: monitoring
serviceMetadata:
minioServiceLabels:
v1.min.io/tenant: monitoring
configSecret:
existingSecret: true
name: minio-monitoring-root
accessKey: ""
secretKey: ""
# Use certificate generated by cert-manager in minio-logging-init instead of relyin on minio operator
certificate:
requestAutoCert: false
externalCertSecret:
- name: minio-monitoring-internal-tls
type: kubernetes.io/tls
# The Kubernetes secret name that contains MinIO environment variable configurations:
configuration:
name: minio-monitoring-root
# Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning:
users:
- name: minio-monitoring-user
pools:
- servers: \'{{ min .Values._internal.node_count 4 }}\'
name: pool-0
volumesPerserver: 4
size: 10Gi
storageClassName: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
resources:
limits:
cpu: 500m
memory: \'{{ has .Values.env_type (list "dev" "ci") | ternary "512Mi" "2Gi" }}\'
requests:
cpu: 100m
memory: \'{{ has .Values.env_type (list "dev" "ci") | ternary "128Mi" "1Gi" }}\'
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: v1.min.io/pool
operator: In
values:
- pool-0
- key: v1.min.io/tenant
operator: In
values:
- monitoring
topologyKey: "kubernetes.io/hostname"
metrics:
enabled: true
env:
- name: MINIO_PROMETHEUS_AUTH_TYPE
value: public
features:
bucketDNS: false
enableSFTP: false
buckets:
- name: "thanos"
region: "monitoring-cluster"
objectLock: false
prometheusOperator: false # Prometheus Operator\'s Service Monitor for MinIO Tenant Pods
logging:
anonymous: true
json: true
quiet: true
ingress:
api:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
host: \'{{ .Values.external_hostnames.minio_monitoring }}\'
path: /
pathType: Prefix
tls:
- hosts:
- \'{{ .Values.external_hostnames.minio_monitoring }}\'
secretName: minio-monitoring-tls
console:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
host: \'{{ .Values.external_hostnames.minio_monitoring_console }}\'
path: /
pathType: Prefix
tls:
- hosts:
- \'{{ .Values.external_hostnames.minio_monitoring_console }}\'
secretName: minio-monitoring-tls
kustomization_spec:
healthChecks:
- apiVersion: minio.min.io/v2
kind: Tenant
name: monitoring
namespace: minio-monitoring
healthCheckExprs: >-
{{
tuple (list (dict
"apiVersion" "minio.min.io/v2"
"kind" "Tenant"
"current" "status.healthStatus == \\"green\\""
"failed" "status.healthStatus != \\"green\\""
)
)
(lookup
"apiextensions.k8s.io/v1"
"CustomResourceDefinition"
""
"kustomizations.kustomize.toolkit.fluxcd.io"
| toYaml
| contains "HealthCheckExprs"
)
| include "set-only-if"
}}
thanos-init:
info:
description: sets up thanos certificate
details: it generates a multiple CN certificate for all Thanos components
internal: true
enabled_conditions:
- \'{{ tuple . "thanos" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
eso-secret-stores: true
minio-monitoring-init: \'{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}\'
repo: sylva-core
kustomization_spec:
path: \'{{ if .Values.monitoring.thanos.objstoreConfig.config | empty }}./kustomize-units/thanos-init{{ else }}./kustomize-units/thanos-dummy{{ end }}\'
wait: true
postBuild:
substitute:
SERVICE: thanos
SERVICE_DNS: \'{{ .Values.external_hostnames.thanos }}\'
CERTIFICATE_NAMESPACE: thanos
THANOS_RECEIVE_DNS: \'{{ .Values.external_hostnames.thanos_receive }}\'
THANOS_STOREGATEWAY_DNS: \'{{ .Values.external_hostnames.thanos_storegateway }}\'
THANOS_QUERY_DNS: \'{{ .Values.external_hostnames.thanos_query }}\'
THANOS_BUCKETWEB_DNS: \'{{ .Values.external_hostnames.thanos_bucketweb }}\'
CERT: \'{{ .Values.external_certificates.thanos.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
_patches:
- target:
kind: Certificate
patch: |
- op: add
path: /spec/dnsNames/-
value: ${THANOS_RECEIVE_DNS}
- op: add
path: /spec/dnsNames/-
value: ${THANOS_STOREGATEWAY_DNS}
- op: add
path: /spec/dnsNames/-
value: ${THANOS_QUERY_DNS}
- op: add
path: /spec/dnsNames/-
value: ${THANOS_BUCKETWEB_DNS}
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.thanos "cert") }}\'
- "../tls-components/sylva-ca"
thanos-credentials-secret:
info:
description: create a secret containing tenant\'s thanos credentials
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "thanos" | include "unit-enabled" }}\'
depends_on:
kyverno-policies-ready: true
repo: sylva-core
kustomization_substitute_secrets:
PASSWORD: \'{{ .Values._internal.thanos_password }}\'
kustomization_spec:
targetNamespace: \'{{ .Release.Namespace }}\'
path: ./kustomize-units/credentials-secret
postBuild:
substitute:
LABEL_NAME: sylva.io/aggregated-secret
LABEL_VALUE: thanos
SECRET_NAME: thanos-credentials
USERNAME: \'{{ .Values._internal.thanos_user }}\'
healthChecks: # ensure that kyverno has produced the secret
- apiVersion: v1
kind: Secret
namespace: sylva-system
name: thanos-secrets
thanos-statefulsets-cleanup:
info:
description: Remove Thanos statefulsets from previous deployments
internal: true
unit_templates:
- base-deps
- kube-job
enabled_conditions:
- \'{{ tuple . "thanos" | include "unit-enabled" }}\'
depends_on:
namespace-defs: true
thanos-init: true
thanos-credentials-secret: true
kustomization_spec:
targetNamespace: thanos
_patches:
- \'{{ tuple . "units.thanos.helmrelease_spec.values" | include "interpret" -}}
{{ include "kube-job-add-env-var-patch" (dict
"EXPECTED_HASH_receive" (.Values.units.thanos.helmrelease_spec.values.receive.persistence | toJson | sha256sum | trunc 8)
"EXPECTED_HASH_storegateway" (.Values.units.thanos.helmrelease_spec.values.storegateway.persistence | toJson | sha256sum | trunc 8)
"EXPECTED_HASH_ruler" (.Values.units.thanos.helmrelease_spec.values.ruler.persistence | toJson | sha256sum | trunc 8)
)}}\'
- \'{{ include "kube-job-replace-script-patch" (.Files.Get "scripts/thanos-delete-statefulsets.sh") }}\'
thanos:
info:
description: installs Thanos
maturity: beta
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
thanos-init: true
thanos-credentials-secret: true
thanos-statefulsets-cleanup: \'{{ tuple . "thanos-statefulsets-cleanup" | include "unit-enabled" }}\'
minio-monitoring: \'{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}\'
\'{{ .Values._internal.default_storage_class_unit }}\': true
ingress-nginx: \'{{ tuple . "ingress-nginx" | include "unit-enabled" }}\'
annotations:
sylvactl/readyMessage: "Thanos UIs can be reached at https://{{ .Values.external_hostnames.thanos }} and https://{{ .Values.external_hostnames.thanos_bucketweb }} ({{ .Values.external_hostnames.thanos }} and {{ .Values.external_hostnames.thanos_bucketweb }} must resolve to {{ .Values.display_external_ip }})"
repo: bitnami-thanos
helmrelease_spec:
driftDetection:
ignore:
- target:
# Thanos-compactor replicas is managed by Kyverno policies (kustomize-units/kyverno-policies/generic/components/thanos)
# Hence, drift detection should ignore it.
kind: Deployment
name: thanos-compactor
paths:
- /spec/replicas
targetNamespace: thanos
install:
createNamespace: false
chart:
spec:
chart: bitnami/thanos
values:
image:
registry: quay.io
repository: thanos/thanos
tag: v0.39.2
volumePermissions:
image:
repository: bitnamilegacy/os-shell
fullnameOverride: "thanos"
metrics:
enabled: true
serviceMonitor:
enabled: true
extraParameters:
basicAuth:
password:
name: thanos-basic-auth
key: password
username:
name: thanos-basic-auth
key: username
ruler:
enabled: true
resources:
requests:
cpu: 0.1
memory: 128Mi
limits:
cpu: 0.5
memory: 512Mi
dnsDiscovery:
enabled: false
extraFlags:
- --query.config-file=/etc/thanos/ruler-queries.yaml
- --label=platform_tag="{{ .Values.monitoring.platform_tag }}"
- --enable-auto-gomemlimit
- --tsdb.retention=12h
- --tsdb.wal-compression
extraVolumes:
- name: thanos-ruler-queries-config
secret:
secretName: thanos-ruler-queries
extraVolumeMounts:
- name: thanos-ruler-queries-config
mountPath: /etc/thanos/ruler-queries.yaml
subPath: ruler-queries.yaml
alertmanagersConfig: |
alertmanagers:
- scheme: http
api_version: v2
timeout: 10s
static_configs:
{{- $count_am := .Values.units.monitoring.helmrelease_spec.values.alertmanager.alertmanagerSpec.replicas -}}
{{- if kindIs "string" $count_am -}}
{{- $count_am = tuple . $count_am | include "interpret-as-string" -}}
{{- end -}}
{{- $count_am = int $count_am -}}
{{- range $index_am := until $count_am }}
- alertmanager-rancher-monitoring-alertmanager-{{ $index_am }}.alertmanager-operated.cattle-monitoring-system:9093
{{- end -}}
existingConfigmap: "sylva-thanos-rules-configmap" # this ConfigMap is a byproduct of the sylva-thanos-rules unit
sidecars:
# reload thanos-ruler when changes to rule files or alertmanager
# list are detected
- name: configmap-reload
image: bitnamilegacy/configmap-reload:0.15.0
args:
- --volume-dir=/conf/rules/
- --volume-dir=/conf/alertmanagers/
- --webhook-url=http://{{ .Values._internal.thanos_user }}:{{ .Values._internal.thanos_password }}@localhost:10902/-/reload
volumeMounts:
# volume from "existingConfigmap"
- name: ruler-config
mountPath: /conf/rules
- name: alertmanagers-config
mountPath: /conf/alertmanagers
volumes:
- name: alertmanagers-config
secret:
secretName: thanos-ruler-alertmanagers-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
resources:
requests:
memory: 64Mi
cpu: 100m
limits:
memory: 128Mi
cpu: 250m
persistence:
enabled: true
storageClass: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
size: \'{{ .Values.monitoring.thanos.ruler.persistence.size }}\'
evalInterval: 1m
replicaCount: \'{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include "preserve-type" }}\'
pdb:
create: true
minAvailable: \'{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include "preserve-type" }}\'
service:
clusterIP: None
query:
logLevel: info
extraFlags:
- --query.auto-downsampling
- --query.replica-label=prometheus_replica
- --query.replica-label=prometheus
- --enable-auto-gomemlimit
- --query.max-concurrent=20
- --query.max-concurrent-select=4
resources:
requests:
cpu: 0.1
memory: 256Mi
limits:
cpu: 0.5
memory: 512Mi
ingress:
enabled: true
ingressClassName: "nginx"
hostname: \'{{ .Values.external_hostnames.thanos_query }}\'
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.thanos_query }}\'
secretName: thanos-tls
queryFrontend:
logLevel: info
extraFlags:
- --query-frontend.forward-header=Authorization
- --cache-compression-type=snappy
- --query-range.split-interval=6h
- --enable-auto-gomemlimit
resources:
requests:
cpu: 0.1
memory: 512Mi
limits:
cpu: 0.5
memory: 1.5Gi
config: |-
type: IN-MEMORY
config:
max_size: 1GB
max_size_items: 0
validity: 0s
ingress:
enabled: true
ingressClassName: "nginx"
hostname: \'{{ .Values.external_hostnames.thanos }}\'
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.thanos }}\'
secretName: thanos-tls
storegateway:
enabled: true
logLevel: info
extraFlags:
- --index-cache-size=1.5GB
- --enable-auto-gomemlimit
- --sync-block-duration=3m
persistence:
enabled: true
storageClass: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
size: \'{{ .Values.monitoring.thanos.storegateway.persistence.size }}\'
ingress:
enabled: true
hostname: \'{{ .Values.external_hostnames.thanos_storegateway }}\'
ingressClassName: "nginx"
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.thanos_storegateway }}\'
secretName: thanos-tls
resources:
requests:
cpu: 0.1
memory: 1.5Gi
limits:
cpu: 0.5
memory: 2Gi
compactor:
persistence:
enabled: true
storageClass: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
size: \'{{ .Values.monitoring.thanos.compactor.persistence.size }}\'
enabled: true
logLevel: info
extraFlags:
- --deduplication.replica-label="prometheus_replica"
- --delete-delay=12h
consistencyDelay: 30m
resources:
requests:
cpu: 0.5
memory: 512Mi
limits:
cpu: 1
memory: 1024Mi
retentionResolutionRaw: 2d
retentionResolution5m: 15d
retentionResolution1h: 0d # disable 1h resolution retention
receive:
enabled: true
logLevel: info
resources:
requests:
cpu: 0.5
memory: 4Gi
limits:
cpu: 1
memory: 6Gi
persistence:
enabled: true
storageClass: \'{{ tuple . (tuple . "two-replicas-storageclass" | include "unit-enabled") "two-replicas-storageclass" .Values._internal.default_storage_class | include "interpret-ternary" }}\'
accessModes: ["ReadWriteOnce"]
size: \'{{ .Values.monitoring.thanos.receive.persistence.size }}\'
tsdbRetention: 12h
extraFlags:
- --enable-auto-gomemlimit
- --tsdb.max-retention-bytes=12GB
- --tsdb.wal-compression
- --tsdb.min-block-duration=1h # flush to object storage faster
- --tsdb.max-block-duration=1h # flush to object storage faster
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/client-body-buffer-size: 1m
hostname: \'{{ .Values.external_hostnames.thanos_receive }}\'
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.thanos_receive }}\'
secretName: thanos-tls
bucketweb:
enabled: true
logLevel: info
resources:
requests:
cpu: 0.1
memory: 128Mi
limits:
cpu: 0.5
memory: 256Mi
ingress:
enabled: true
hostname: \'{{ .Values.external_hostnames.thanos_bucketweb }}\'
ingressClassName: "nginx"
extraTls:
- hosts:
- \'{{ .Values.external_hostnames.thanos_bucketweb }}\'
secretName: thanos-tls
valuesFrom:
# use values from thanos-minio-user only when using internal MinIO storage
- >-
{{ tuple (dict
"kind" "Secret"
"name" "thanos-minio-user"
"valuesKey" "CONSOLE_ACCESS_KEY"
"targetPath" "objstoreConfig.config.access_key"
"optional" true
)
(.Values.monitoring.thanos.objstoreConfig.config | empty)
| include "set-only-if"
}}
- >-
{{ tuple (dict
"kind" "Secret"
"name" "thanos-minio-user"
"valuesKey" "CONSOLE_SECRET_KEY"
"targetPath" "objstoreConfig.config.secret_key"
"optional" true
)
(.Values.monitoring.thanos.objstoreConfig.config | empty)
| include "set-only-if"
}}
# Use tenant list built by kyverno policy thanos-aggregated-secret to fill auth.basicAuthUsers object
- kind: Secret
name: thanos-secrets
valuesKey: secrets
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
# add the current persistence config as a StatefulSet annotation so we know if we need to delete it
- kustomize:
patches:
- target:
kind: StatefulSet
name: thanos-receive
patch: |
- op: add
path: /metadata/annotations/thanos.persistent-config-hash
value: |-
{{ tuple . "units.thanos.helmrelease_spec.values.receive.persistence" | include "interpret" -}}
{{ .Values.units.thanos.helmrelease_spec.values.receive.persistence | toJson | sha256sum | trunc 8 }}
- target:
kind: StatefulSet
name: thanos-storegateway
patch: |
- op: add
path: /metadata/annotations/thanos.persistent-config-hash
value: |-
{{ tuple . "units.thanos.helmrelease_spec.values.storegateway.persistence" | include "interpret" -}}
{{ .Values.units.thanos.helmrelease_spec.values.storegateway.persistence | toJson | sha256sum | trunc 8 }}
- target:
kind: StatefulSet
name: thanos-ruler
patch: |
- op: add
path: /metadata/annotations/thanos.persistent-config-hash
value: |-
{{ tuple . "units.thanos.helmrelease_spec.values.ruler.persistence" | include "interpret" -}}
{{ .Values.units.thanos.helmrelease_spec.values.ruler.persistence | toJson | sha256sum | trunc 8 }}
helm_secret_values:
extraDeploy:
- apiVersion: v1
kind: Secret
metadata:
name: thanos-basic-auth
type: Opaque
data:
username: \'{{ .Values._internal.thanos_user | b64enc }}\'
password: \'{{ .Values._internal.thanos_password | b64enc }}\'
- apiVersion: v1
kind: Secret
metadata:
name: thanos-ruler-queries
namespace: thanos
type: Opaque
stringData:
ruler-queries.yaml: |
- http_config:
basic_auth:
username: "{{ .Values._internal.thanos_user }}"
password: "{{ .Values._internal.thanos_password }}"
tls_config:
insecure_skip_verify: true
static_configs: ["thanos-query-frontend:9090"]
scheme: http
objstoreConfig:
\'{{ .Values._internal.thanos.objstoreConfig | include "preserve-type" }}\'
logging-crd:
info:
description: install logging-operator CRD
maturity: beta
hidden: true
enabled_conditions:
- \'{{ tuple . "logging" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
repo: logging-chart-repository
helmrelease_spec:
releaseName: logging-crd
targetNamespace: kube-logging
chart:
spec:
chart: charts/logging-operator/charts/logging-operator-crds
install:
createNamespace: false
kustomization_spec:
prune: false
logging:
enabled: false
info:
description: Deploys Fluentbit/Fluentd logging stack from logging-operator chart, for log scraping and shipping
unit_templates:
- base-deps
depends_on:
logging-crd: true
repo: logging-chart-repository
helmrelease_spec:
targetNamespace: kube-logging
chart:
spec:
chart: charts/logging-operator
values:
logging:
enabled: true
fluentd:
resources:
limits:
memory: 1Gi
requests:
memory: 300M
fluentbit:
customParsers: |
[PARSER]
Name crilog
Format regex
Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]+) (?<log>.*)$
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L%z
Time_Keep "On"
inputTail:
Parser: crilog
Path_Key: "log_file"
image:
repository: ghcr.io/kube-logging/logging-operator
tag: \'{{ .Values.source_templates | dig "logging-chart-repository" "spec" "ref" "tag" "error, source_templates.logging-chart-repository has no spec.ref.tag" }}\'
securityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop: [ "ALL" ]
logging-config:
info:
description: Configures logging unit to ship logs to Loki
internal: true
enabled_conditions:
- \'{{ tuple . "logging" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
logging: true
#loki: this dependency is handled in a different way, in management.values.yaml and workload-cluster.values.yaml
repo: sylva-core
kustomization_substitute_secrets:
LOKI_USERNAME: \'{{ .Values._internal.loki_user }}\'
LOKI_PASSWORD: \'{{ .Values._internal.loki_password }}\'
kustomization_spec:
path: ./kustomize-units/kube-logging
postBuild:
substitute:
# we must use external name in order to work also for workload clusters
LOKI_URL: \'{{ .Values.logging.loki_url }}\'
CLUSTER_NAME: \'{{ .Values.cluster.name }}\'
PATH: /var/log/journal
CHUNK_LIMIT_SIZE: 8m
FLUSH_THREAD_COUNT: "8"
FLUSH_INTERVAL: 2s
FLUSH_MODE: interval
wait: true
_components:
- \'{{ tuple "components/host-tailer-rke2" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" ) | include "set-only-if" }}\'
- \'{{ tuple "components/host-tailer-kubeadm" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" ) | include "set-only-if" }}\'
sylva-logging-flows:
info:
description: configures logging flows and output to export the platform logs to an external server
maturity: beta
internal: false
unit_templates:
- base-deps
enabled: false
depends_on:
logging: true
repo: sylva-logging-flows
helm_chart_artifact_name: sylva-logging-flows
# flow, clusterflows, outputs and clusteroutputs values are documented in the sylva-logging-flow helmchart https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: kube-logging
values:
flows: \'{{ .Values.logging.flows | include "preserve-type" }}\'
clusterflows: \'{{ .Values.logging.clusterflows | include "preserve-type" }}\'
helm_secret_values:
outputs: \'{{ .Values.logging.outputs | include "preserve-type" }}\'
clusteroutputs: \'{{ .Values.logging.clusteroutputs | include "preserve-type" }}\'
loki-init:
info:
description: sets up Loki certificate
details: it generate certificate
internal: true
enabled_conditions:
- \'{{ tuple . "loki" | include "unit-enabled" }}\'
unit_templates:
- base-deps
depends_on:
namespace-defs: true
sylva-ca: true
external-secrets-operator: true
eso-secret-stores: true
minio-logging-init: true
repo: sylva-core
kustomization_substitute_secrets:
KEY: \'{{ .Values.external_certificates.loki.key | default "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/loki-init
wait: true
postBuild:
substitute:
SERVICE: loki
SERVICE_DNS: \'{{ .Values.external_hostnames.loki }}\'
CERTIFICATE_NAMESPACE: loki
CERT: \'{{ .Values.external_certificates.loki.cert | default "" | b64enc }}\'
CACERT: \'{{ .Values.external_certificates.cacert | default "" | b64enc }}\'
SECRET_PATH: \'{{ .Values.security.vault.paths.secret }}\'
_components:
- \'{{ ternary "../tls-components/tls-secret" "../tls-components/tls-certificate" (hasKey .Values.external_certificates.loki "cert") }}\'
- "../tls-components/sylva-ca"
loki-credentials-secret:
info:
description: create a secret containing tenant\'s loki credentials
internal: true
unit_templates:
- base-deps
depends_on:
kyverno-policies-ready: true
enabled_conditions:
- \'{{ tuple . "loki" | include "unit-enabled" }}\'
repo: sylva-core
kustomization_substitute_secrets:
PASSWORD: \'{{ .Values._internal.loki_password }}\'
kustomization_spec:
targetNamespace: \'{{ .Release.Namespace }}\'
path: ./kustomize-units/credentials-secret
postBuild:
substitute:
LABEL_NAME: sylva.io/aggregated-secret
LABEL_VALUE: loki
SECRET_NAME: loki-credentials
USERNAME: \'{{ .Values._internal.loki_user }}\'
healthChecks:
# Ensure that loki-aggregated-secret Kyverno ClusterPolicy has produced the secret
- apiVersion: v1
kind: Secret
namespace: sylva-system
name: loki-secrets
healthCheckExprs:
# Check that loki-secrets.data.secrets contains at least a tenant definition,
# considering that base64 encoded value of \'{"loki":{"tenants":[]}}\' is 33 characters long
- apiVersion: v1
kind: Secret
current: size(data.?secrets.orValue("")) > 33
failed: size(data.?secrets.orValue("")) <= 33
loki:
info:
description: installs Loki log storage
details: installs Loki log storage in simple scalable mode
maturity: beta
unit_templates:
- base-deps
depends_on:
namespace-defs: true
loki-init: true
loki-credentials-secret: true
minio-logging: true
logging-crd: \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ tuple . "logging" | include "unit-enabled" }}\'
annotations:
sylvactl/readyMessage: "Loki can be reached at https://{{ .Values.external_hostnames.loki }} ({{ .Values._internal.display_external_ip_msg }})"
helm_repo_url: oci://ghcr.io/grafana/helm-charts
helmrelease_spec:
driftDetection:
ignore:
- paths:
- /metadata/annotations/prometheus-operator-validated
target:
group: monitoring.coreos.com
kind: PrometheusRule
targetNamespace: loki
chart:
spec:
chart: loki
version: 6.41.1
install:
createNamespace: false
values:
global:
dnsService: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary "rke2-coredns-rke2-coredns" "kube-dns" }}\'
loki:
analytics:
reporting_enabled: false
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
server:
grpc_server_max_recv_msg_size: 52428800
grpc_server_max_send_msg_size: 52428800
compactor:
compaction_interval: 1h
retention_enabled: true
retention_delete_delay: 2h
retention_delete_worker_count: 150
delete_request_store: s3
limits_config:
per_stream_rate_limit: 10MB
ingestion_rate_strategy: local
ingestion_rate_mb: 100
ingestion_burst_size_mb: 100
retention_period: 72h
reject_old_samples: false
unordered_writes: true
allow_structured_metadata: false
ingester:
wal:
enabled: true
dir: /data/wal
schemaConfig:
configs:
- from: "2022-01-11"
index:
period: 24h
prefix: loki_index_
object_store: s3
schema: v13
store: tsdb
commonConfig:
replication_factor: 1
storage:
bucketNames:
chunks: "loki-chunks"
ruler: "loki-ruler"
admin: "loki-admin"
s3:
endpoint: "minio.minio-logging.svc.cluster.local:443"
s3ForcePathStyle: true
http_config:
insecure_skip_verify: true
storage_class: REDUCED_REDUNDANCY
test:
enabled: false
lokiCanary:
enabled: false
chunksCache:
allocatedMemory: 4096
resources:
limits:
memory: 5Gi
requests:
memory: 500Mi
resultsCache:
allocatedMemory: 512
resources:
limits:
memory: 600Mi
requests:
memory: 60Mi
monitoring:
selfMonitoring:
enabled: false
grafanaAgent:
installOperator: false
serviceMonitor:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
relabelings:
# drop loki-gateway endpoints from scraping as basicAuth config for serviceMonitor
# is not supported. See: https://github.com/grafana/loki/issues/14141
- sourceLabels:
- job
regex: "loki/loki-gateway"
action: drop
dashboards:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
rules:
enabled: \'{{ .Values._internal.monitoring.enabled | include "preserve-type" }}\'
additionalGroups:
- name: additional-loki-rules
rules:
- record: job:loki_request_duration_seconds_bucket:sum_rate
expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job)
- record: job_route:loki_request_duration_seconds_bucket:sum_rate
expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route)
- record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate
expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container)
write:
replicas: \'{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}\'
persistence:
size: 3Gi
autoscaling:
enable: true
minReplicas: 2
maxReplicas: 6
targetCPUUtilizationPercentage: 40
extraVolumes:
- name: data
emptyDir: {}
- name: loki
emptyDir: {}
extraVolumeMounts:
- name: data
mountPath: /data
- name: loki
mountPath: /loki
read:
replicas: \'{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}\'
persistence:
size: 3Gi
backend:
replicas: \'{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}\'
persistence:
size: 3Gi
memberlist:
service:
publishNotReadyAddresses: true
gateway:
nginxConfig:
clientMaxBodySize: 30M
replicas: \'{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}\'
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "4096m"
nginx.ingress.kubernetes.io/client-body-buffer-size: 10m
hosts:
- host: \'{{ .Values.external_hostnames.loki }}\'
paths:
- path: /
pathType: Prefix
tls:
- secretName: loki-tls
hosts:
- \'{{ .Values.external_hostnames.loki }}\'
basicAuth:
enabled: true
containerSecurityContext:
seccompProfile:
type: RuntimeDefault
kubectlImage:
repository: alpine/kubectl
tag: 1.34.1
valuesFrom:
- kind: Secret
name: loki-minio-user
valuesKey: CONSOLE_ACCESS_KEY
targetPath: loki.storage.s3.accessKeyId
optional: false
- kind: Secret
name: loki-minio-user
valuesKey: CONSOLE_SECRET_KEY
targetPath: loki.storage.s3.secretAccessKey
optional: false
# Use tenant list built by kyverno policy loki-aggregated-secret, it will fill loki.tenant object
- kind: Secret
name: loki-secrets
valuesKey: secrets
_postRenderers:
- kustomize:
patches:
- \'{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include "preserve-type" }}\'
single-replica-storageclass:
info:
description: Defines a Longhorn storage class with a single replica
internal: true
repo: sylva-core
unit_templates:
- base-deps
depends_on:
longhorn: true
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
kustomization_spec:
path: ./kustomize-units/longhorn-storageclass
wait: true
postBuild:
substitute:
CLASS_NAME: single-replica-storageclass
_patches:
- target:
kind: StorageClass
patch: |
kind: _unused_
metadata:
name: _unused_
annotations:
storageclass.kubernetes.io/is-default-class: \'{{ .Values._internal.default_storage_class | eq "single-replica-storageclass" }}\'
parameters:
numberOfReplicas: "1"
two-replicas-storageclass:
info:
description: Defines a Longhorn storage class with two replicas
internal: true
repo: sylva-core
unit_templates:
- base-deps
depends_on:
longhorn: true
enabled_conditions:
- \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
- \'{{ gt (.Values._internal.longhorn_node_count_upper_bound | int) 1 }}\'
kustomization_spec:
path: ./kustomize-units/longhorn-storageclass
wait: true
postBuild:
substitute:
CLASS_NAME: two-replicas-storageclass
_patches:
- target:
kind: StorageClass
patch: |
kind: _unused_
metadata:
name: _unused_
annotations:
storageclass.kubernetes.io/is-default-class: \'{{ .Values._internal.default_storage_class | eq "two-replicas-storageclass" }}\'
parameters:
numberOfReplicas: "2"
sylva-prometheus-rules:
info:
description: installs prometheus rules using external helm chart & rules git repo
maturity: beta
unit_templates:
- base-deps
depends_on:
monitoring: true
enabled_conditions:
- \'{{ tuple . "monitoring" | include "unit-enabled" }}\'
repo: sylva-prometheus-rules
helm_chart_artifact_name: sylva-prometheus-rules
helmrelease_spec:
releaseName: sylva-prometheus-rules
targetNamespace: sylva-prometheus-rules
chart:
spec:
chart: .
install:
createNamespace: true
values:
createRules:
allclusters: true
\'{{ .Values.cluster.name }}\': true
optional_rules: \'{{ .Values._internal.monitoring.conditionals | include "preserve-type" }}\'
sylva-thanos-rules:
info:
description: installs Thanos rules using external helm chart & rules git repo
maturity: beta
unit_templates:
- base-deps
depends_on:
thanos-init: true
enabled_conditions:
- \'{{ tuple . "thanos" | include "unit-enabled" }}\'
- \'{{ .Values.units.thanos.helmrelease_spec.values.ruler.enabled }}\'
repo: sylva-thanos-rules
helm_chart_artifact_name: sylva-thanos-rules
helmrelease_spec:
releaseName: sylva-thanos-rules
targetNamespace: thanos
chart:
spec:
chart: .
sync-openstack-images:
info:
description: Automatically push openstack images to Glance
details: Pushes OS images to Glance, if needed, and retrieves their UUIDs for use in cluster unit
unit_templates:
- base-deps
depends_on:
os-images-info: true
annotations:
sylvactl/unitTimeout: \'{{ mul .Values.get_openstack_images_per_image_timeout_minutes (include "generate-os-images" . | fromYaml | dig "os_images" dict | len) }}m\'
enabled_conditions:
- \'{{ eq .Values.cluster.capi_providers.infra_provider "capo" }}\'
repo: sync-openstack-images
helm_chart_artifact_name: sync-openstack-images
helmrelease_spec:
chart:
spec:
chart: .
targetNamespace: \'{{ .Release.Namespace }}\'
install:
createNamespace: false
timeout: 60m
valuesFrom:
# os_images:
- kind: ConfigMap
name: os-images-info # by-product of os-images-info unit
values:
downloader:
insecure_client: \'{{ .Values.oci_registry_insecure }}\'
proxy: \'{{ .Values.mgmt_cluster_state_values.proxies.https_proxy }}\'
no_proxy: \'{{ .Values.mgmt_cluster_state_values.proxies.no_proxy }}\'
extra_ca_certs: \'{{ tuple (.Values.oci_registry_extra_ca_certs | default "") (not .Values.oci_registry_insecure) | include "set-only-if" }}\'
os_image_selectors: >-
{{- tuple . "cluster" | include "interpret" -}}
{{ tuple .Values.cluster | include "find-cluster-image-selectors" | fromJsonArray | include "preserve-type" }}
helm_secret_values:
openstack_clouds_yaml: \'{{ .Values.cluster.capo.clouds_yaml | include "preserve-type" }}\'
descheduler:
info:
description: install descheduler
enabled: false
unit_templates:
- base-deps
helm_repo_url: https://kubernetes-sigs.github.io/descheduler/
helmrelease_spec:
releaseName: descheduler
targetNamespace: kube-system
chart:
spec:
chart: descheduler
version: 0.33.0
values:
kind: CronJob
schedule: "*/10 * * * *"
deschedulerPolicy:
profiles:
- name: sylva
pluginConfig:
- name: RemoveDuplicates
- name: LowNodeUtilization
args:
useDeviationThresholds: true
thresholds:
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 60
memory: 60
pods: 50
plugins:
balance:
enabled:
- RemoveDuplicates
- LowNodeUtilization
workload-team-defs:
info:
description: installs the workload-team-defs chart
details: installs the workload-team-defs chart to install the workload cluster through CRD
maturity: experimental
repo: workload-team-defs
unit_templates:
- base-deps
depends_on:
sylva-units-operator: \'{{ tuple . "sylva-units-operator" | include "unit-enabled" }}\'
external-secrets-operator: \'{{ tuple . "sylva-units-operator" | include "unit-enabled" }}\'
enabled_conditions:
- \'{{ tuple . "workload-cluster-operator" | include "unit-enabled" }}\'
helm_chart_artifact_name: workload-team-defs
helmrelease_spec:
chart:
spec:
chart: .
values:
workload_cluster_teams: \'{{ .Values.workload_clusters.teams | include "preserve-type" }}\'
managed_clusters_settings: \'{{ tuple . "mgmt_cluster_state_values" | include "interpret" }}{{ .Values.mgmt_cluster_state_values | include "preserve-type" }}\'
workload_cluster_sylva_source: \'{{ .Values.workload_clusters.sylva_source | include "preserve-type" }}\'
coredns-custom-hosts-import:
enabled: false
info:
description: create a ConfigMap containing workload cluster\'s DNS A records in [CoreDNS hosts plugin](https://coredns.io/plugins/hosts/)
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpoa" }}\'
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/coredns-custom-hosts-import
postBuild:
substitute:
NAMESPACE: \'{{ .Release.Namespace }}\'
CLUSTER_NAME: \'{{ .Values.cluster.name }}\'
CLUSTER_VIRTUAL_IP: \'{{ .Values.cluster_virtual_ip }}\'
CLUSTER_OKD_BASE_DOMAIN: \'{{ .Values.cluster.openshift.baseDomain }}\'
backup-capi-resources:
info:
description: Backup Cluster API resources
details: Backup periodically Cluster API resources using clusterctl move
unit_templates:
- base-deps
- \'{{ tuple "backup-s3" (hasKey ((.Values.backup | default dict) | dig "store" dict) "s3") | include "set-only-if" }}\'
- kube-cronjob
enabled_conditions:
- \'{{ .Values.backup | ne nil }}\'
kustomization_spec:
path: ./kustomize-units/backup
postBuild:
substitute:
ADDITIONAL_RESOURCES: ConfigMap/sylva-units-values Secret/sylva-units-secrets ConfigMap/capo-cluster-resources # space-separated kind/name resources to backup alongside capi resources
_components:
- "./components/backup-capi-resources"
- \'{{ tuple "./components/pushgateway" (tuple . "prometheus-pushgateway" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "./components/timestamp" ((.Values.backup | default dict) | dig "timestamped" false) | include "set-only-if" }}\'
_patches:
- target:
kind: CronJob
patch: |
- op: replace
path: /spec/schedule
value: "0 5 * * * "
backup-etcd:
info:
description: Backup Etcd
details: Backup periodically Etcd using etcdctl
unit_templates:
- base-deps
- \'{{ tuple "backup-s3" (hasKey ((.Values.backup | default dict) | dig "store" dict) "s3") | include "set-only-if" }}\'
- kube-cronjob
enabled_conditions:
- \'{{ .Values.backup | ne nil }}\'
kustomization_spec:
path: ./kustomize-units/backup
targetNamespace: kube-system
postBuild:
substitute:
CLUSTER_NAME: \'{{ .Values.cluster.name }}\'
RUNASUSER: "915"
RUNASGROUP: "915"
_components:
- "./components/backup-etcd"
- \'{{ tuple "./components/backup-etcd/cabpk-volumes" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpk") | include "set-only-if" }}\'
- \'{{ tuple "./components/backup-etcd/cabpr-volumes" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr") | include "set-only-if" }}\'
- \'{{ tuple "./components/pushgateway" (tuple . "prometheus-pushgateway" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "./components/timestamp" ((.Values.backup | default dict) | dig "timestamped" false) | include "set-only-if" }}\'
_patches:
- target:
kind: CronJob
patch: |
- op: replace
path: /spec/schedule
value: "0 5 * * * "
rancher-backup-restore-operator-crd:
info:
description: installs the Rancher Backup Restore operator CRDs
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "rancher-backup-restore-operator" | include "unit-enabled" }}\'
helm_repo_url: https://charts.rancher.io
helmrelease_spec:
driftDetection:
mode: warn
chart:
spec:
chart: rancher-backup-crd
version: 106.0.3+up7.0.2
kustomization_spec:
prune: false
rancher-backup-restore-operator-init:
info:
description: configure the Rancher Backup Restore operator, which permit to backup and restore rancher
internal: true
unit_templates:
- base-deps
enabled_conditions:
- \'{{ tuple . "rancher-backup-restore-operator" | include "unit-enabled" }}\'
depends_on:
rancher-backup-restore-operator-crd: true
repo: sylva-core
kustomization_substitute_secrets:
S3_ACCESS_KEY: \'{{ (.Values.backup | default dict) | dig "store" "s3" "access_key" "" | b64enc }}\'
S3_SECRET_KEY: \'{{ (.Values.backup | default dict) | dig "store" "s3" "secret_key" "" | b64enc }}\'
kustomization_spec:
path: ./kustomize-units/rancher-backup-restore-operator-config
_components:
- \'{{ tuple "./components/backup-to-s3" (hasKey ((.Values.backup | default dict) | dig "store" dict) "s3") | include "set-only-if" }}\'
postBuild:
substitute:
SCHEDULE: "0 5 * * *"
rancher-backup-restore-operator:
info:
description: installs the Rancher Backup Restore operator, which permit to backup and restore rancher
unit_templates:
- base-deps
depends_on:
rancher-backup-restore-operator-crd: true
rancher-backup-restore-operator-init: true
\'{{ .Values._internal.default_storage_class_unit }}\': \'{{ not (hasKey ((.Values.backup | default dict) | dig "store" dict) "s3") }}\'
helm_repo_url: https://charts.rancher.io
helmrelease_spec:
driftDetection:
mode: warn
chart:
spec:
chart: rancher-backup
version: 106.0.3+up7.0.2
targetNamespace: cattle-resources-system
values:
persistence:
enabled: \'{{ (not (hasKey ((.Values.backup | default dict) | dig "store" dict) "s3")) | include "preserve-type" }}\'
storageClass: \'{{ .Values._internal.default_storage_class }}\'
s3: >-
{{
tuple (dict
"enabled" true
"credentialSecretName" "backup-store-s3-rancher"
"credentialSecretNamespace" "cattle-resources-system"
"bucketName" ((.Values.backup | default dict) | dig "store" "s3" "bucket" "")
"region" ((.Values.backup | default dict) | dig "store" "s3" "region" "")
"endpoint" ((.Values.backup | default dict) | dig "store" "s3" "host" "")
"endpointCA" ((.Values.backup | default dict) | dig "store" "s3" "cert" "")
)
(hasKey ((.Values.backup | default dict) | dig "store" dict) "s3")
| include "set-only-if"
}}
firewall-common:
info:
description: >
configures global network policies applying
on the management cluster and on workload clusters
internal: true
unit_templates:
- base-deps
enabled: false
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/firewall/common
wait: true
postBuild:
substitute:
# _internal.cluster_virtual_ip is defined when not using capo
# Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo
CLUSTER_VIRTUAL_IP: \'{{ .Values._internal.cluster_virtual_ip | include "set-if-defined" }}\'
ALLOWED_PUBLIC_SUBNETS_TO_GUI: \'{{ .Values.security.firewall.allowed_public_subnets_for_gui | default .Values.security.firewall.allowed_public_subnets | toJson }}\'
ALLOWED_PUBLIC_SUBNETS_TO_KUBEAPI: \'{{ .Values.security.firewall.allowed_public_subnets_for_kubeapi | default .Values.security.firewall.allowed_public_subnets | toJson }}\'
ALLOWED_PUBLIC_SUBNETS_TO_SSH: \'{{ .Values.security.firewall.allowed_public_subnets_for_ssh | default .Values.security.firewall.allowed_public_subnets | toJson }}\'
NTP_SERVERS: \'{{ .Values.ntp.servers | toJson }}\'
CLUSTER_PODS_CIDR: \'{{ .Values.cluster.cluster_pods_cidrs | toJson }}\'
COREDNS_SELECTOR: \'{{ .Values._internal.coredns_selector }}\'
substituteFrom:
- kind: ConfigMap
name: capo-cluster-resources # byproduct of capo-cluster-resources units
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
- kind: ConfigMap
name: management-cluster-addresses # generated by \'create-global-network-set-for-(capo|capm3)-cluster-nodes\' Kyverno ClusterPolicy
- kind: ConfigMap
name: cluster-provisioning-addresses # generated by create-global-network-set-for-capm3-cluster-nodes Kyverno ClusterPolicy
optional: \'{{ not (and (.Values.cluster.capi_providers.infra_provider | eq "capm3") .Values.cluster.capm3.provisioning_pool_name) | include "as-bool" }}\'
_components:
- \'{{ tuple "components/rke2" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr") | include "set-only-if" }}\'
firewall-management-cluster:
info:
description: >
configures global network policies applying
only on the management cluster
internal: true
unit_templates:
- base-deps
enabled: false
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/firewall/management-cluster
wait: true
postBuild:
substitute:
# _internal.cluster_virtual_ip is defined when not using capo
# Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo
CLUSTER_VIRTUAL_IP: \'{{ .Values._internal.cluster_virtual_ip | include "set-if-defined" }}\'
COREDNS_SELECTOR: \'{{ .Values._internal.coredns_selector }}\'
DEFAULT_POLICY: \'{{ .Values.security.firewall.default_policy }}\'
substituteFrom:
- kind: ConfigMap
name: capo-cluster-resources
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
_components:
- \'{{ tuple "components/neuvector" (tuple . "neuvector" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/monitoring" (tuple . "monitoring" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/logging" (tuple . "logging" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/metal3" (tuple . "metal3" | include "unit-enabled") | include "set-only-if" }}\'
- \'{{ tuple "components/rke2" (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr") | include "set-only-if" }}\'
firewall-workload-cluster:
info:
description: >
configures global network policies applying
only on the management cluster
internal: true
unit_templates:
- base-deps
enabled: false
repo: sylva-core
kustomization_spec:
path: ./kustomize-units/firewall/workload-cluster
wait: true
postBuild:
substitute:
# _internal.cluster_virtual_ip is defined when not using capo
# Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo
CLUSTER_VIRTUAL_IP: \'{{ .Values._internal.cluster_virtual_ip | include "set-if-defined" }}\'
DEFAULT_POLICY: \'{{ .Values.security.firewall.default_policy }}\'
substituteFrom:
- kind: ConfigMap
name: capo-cluster-resources
optional: \'{{ not (.Values.cluster.capi_providers.infra_provider | eq "capo") | include "as-bool" }}\'
# only on the management cluster with capo enabled
firewall-kyverno-capo:
info:
description: >
configures a kyverno policy to create global network sets
with the IP address assigned to the nodes of the Openstack workload clusters
internal: true
unit_templates:
- base-deps
enabled: false
repo: sylva-core
depends_on:
capo: true
kustomization_spec:
path: ./kustomize-units/firewall/kyverno-capo
# if the deployment is not capo, wait for the creation of the kyverno resources
# else wait not only for the creation of the kyverno resources but also for
# the production of the configmap containing the IP addresses of the management cluster
wait: true
# only on the management cluster with capm3 enabled
firewall-kyverno-capm3:
info:
description: >
configures a kyverno policy to create global network sets
with the IP address assigned to the nodes of the baremetal workload clusters
internal: true
unit_templates:
- base-deps
enabled: false
repo: sylva-core
depends_on:
capm3: true
kustomization_spec:
path: ./kustomize-units/firewall/kyverno-capm3
# if the deployment is not capm3, wait for the creation of the kyverno resources
# else wait not only for the creation of the kyverno resources but also for
# the production of the configmap containing the IP addresses of the management cluster
wait: true
##### stuff related to the \'cluster\' unit #####
#
# all these values under \'cluster\' are passed as values to sylva-capi-cluster chart
cluster:
name: management-cluster
# check if the cluster name is starting with a number, set to false to bypass this check
check_cluster_name: true
# can be set to true to do an RKE2 deployment disconnected from the Internet:
air_gapped: false
# cis profile to be used. Curently supported only for rke2 clusters. "cis-1.6" for k8s prior to 1.25, "cis-1.23" for 1.25+
cis_profile: cis
# for now, the choice below needs to be made
# consistently with the choice of a matching kustomization path
# for the \'cluster\' unit
# e.g. you can use ./management-cluster-def/rke2-capd
capi_providers:
infra_provider: capd # capd, capo, capm3, capv or capone
bootstrap_provider: cabpk # cabpr (RKE2) or cabpk (kubeadm)
# kubernetes version to be used
k8s_version: \'{{ .Values._internal.k8s_version_map | dig .Values.k8s_version_short "" | required (printf "no k8s version defined for %s" .Values.k8s_version_short) }}\'
# kube_vip version to be used for kubeadm deployments
images:
kube_vip:
repository: ghcr.io/kube-vip/kube-vip
tag: "v1.0.0"
# Nodes number for control-plane
control_plane_replicas: 3
kubeapiserver_extra_args:
feature-gates: \'{{ tuple "ValidatingAdmissionPolicy=true" (tuple "<1.32.0" .Values._internal.k8s_version | include "k8s-version-match") | include "set-only-if" }}\'
runtime-config: "admissionregistration.k8s.io/v1beta1"
kubelet_extra_args:
max-pods: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" | ternary "500" "210" }}\'
capo:
# flavor_name: m1.large # Openstack flavor name
# image_key: # key of an image in os_images or sylva_diskimagebuilder_images
# image_name: # (deprecated, please use image_key instead) - OpenStack image name (one of image_key and image_name must be set, but not both)
# ssh_key_name: # OpenStack VM SSH key
# network_id: # OpenStack network used for nodes and VIP port
# rootVolume: {} # Let this parameter empty if you don\'t intent to use root volume
# # otherwise, provide following values
# diskSize: 20 # Size of the VMs root disk
# volumeType: \'__DEFAULT__\' # Type of volume to be created
# #control_plane_az: # list of OpenStack availability zones to deploy control planes nodes to, otherwise all would be candidates
# clouds_yaml: # (this is a dict, not a YAML string)
# clouds:
# capo_cloud:
# auth:
# auth_url: # replace me
# user_domain_name: # replace me
# project_domain_name: # replace me
# project_name: # replace me
# username: # replace me
# password: # replace me
# region_name: # replace me
# verify: # e.g. false
# cacert: # cert used to validate CA of OpenStack APIs
# tag set for OpenStack resources in management cluster:
resources_tag: >-
{{- if .Values.cluster.capi_providers.infra_provider | eq "capo" -}}
sylva-{{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.username }}
{{- end -}}
control_plane:
capo:
security_group_names:
- capo-{{ .Values.cluster.name }}-security-group-ctrl-plane-{{ .Values.cluster.capo.resources_tag }}
- capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}
additional_commands: >-
{{- tuple (dict
"pre_bootstrap_commands" (list
"groupadd -r -g 915 etcd"
"useradd -r -s /sbin/nologin -M -u 915 -g 915 etcd"
)
)
(tuple . "backup-etcd" | include "unit-enabled")
| include "set-only-if"
}}
machine_deployments: {}
machine_deployment_default:
machine_deployment_spec:
strategy:
rollingUpdate:
maxUnavailable: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" | ternary 1 0 | include "preserve-type" }}\'
# use maxSurge 0 for baremetal deployments
maxSurge: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capm3" | ternary 0 1 | include "preserve-type" }}\'
capo:
security_group_names:
- capo-{{ .Values.cluster.name }}-security-group-workers-{{ .Values.cluster.capo.resources_tag }}
- capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}
cluster_virtual_ip: \'{{ .Values.cluster_virtual_ip }}\'
cluster_public_ip: \'{{ tuple (.Values.openstack.floating_ip) (not (eq .Values.openstack.floating_ip "")) | include "set-only-if" }}\'
cluster_services_cidrs:
- 100.73.0.0/16
cluster_pods_cidrs:
- 100.72.0.0/16
cni:
calico:
helm_values: \'{{ .Values.calico_helm_values | include "preserve-type" }}\'
coredns:
helm_values:
tolerations:
- key: "node.cloudprovider.kubernetes.io/uninitialized"
effect: "NoSchedule"
value: "true"
global:
clusterDNS: \'{{ .Values._internal.coredns.clusterDNS }}\'
clusterDomain: \'{{ .Values._internal.coredns.clusterDomain }}\'
serviceCIDR: \'{{ .Values._internal.coredns.serviceCIDR }}\'
metallb_helm_values: \'{{ omit .Values.metallb_helm_values "prometheus" | include "preserve-type" }}\'
ntp: \'{{ .Values.ntp | include "preserve-type" }}\'
proxies:
http_proxy: \'{{ .Values.proxies.http_proxy }}\'
https_proxy: \'{{ .Values.proxies.https_proxy }}\'
no_proxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
registry_mirrors: \'{{ .Values.registry_mirrors | include "preserve-type" }}\'
helm_oci_url: # for an OCI-based deployment this is overridden in use-oci-artifacts.values.yaml
metallb: ""
metallb-resources: ""
calico-crd: ""
calico: ""
coredns: ""
helm_versions:
metallb: \'{{ .Values.units.metallb.helmrelease_spec.chart.spec.version }}\'
metallb-resources: \'{{ .Values.source_templates | dig "metallb-resources" "spec" "ref" "tag" "" }}\'
calico-crd: \'{{ tuple . "calico-crd" (index .Values.units "calico-crd") | include "get-helm-version" }}\'
calico: \'{{ tuple . "calico" .Values.units.calico | include "get-helm-version" }}\'
coredns: \'{{ .Values.units.coredns.helmrelease_spec.chart.spec.version }}\'
helm_extra_ca_certs: # for an OCI-based deployment this is overridden in use-oci-artifacts.values.yaml
metallb: ""
metallb-resources: ""
calico: ""
coredns: ""
capone: {}
# public_network: ""
# master_template: ""
# worker_template: ""
# images: []
# templates: []
# ONE_XMLRPC: ""
# ONE_AUTH: ""
capv:
# image_name: # vSphere image name
# username: ""
# password: ""
# dataCenter: ""
# server: ""
# dataStore: ""
# tlsThumbprint: ""
# folder: ""
# resourcePool: ""
# storagePolicyName: ""
# networks:
# default:
# networkName: ""
# ssh_key: \'\'
numCPUs: 4
capm3:
machine_image_checksum_type: sha256
networks:
primary:
interface: ""
image_provisioning_host: \'{{ .Values.display_external_ip }}\'
image_provisioning_scheme: "https"
default_network_settings:
mtu: 1500 # set the default network interfaces MTU size
enable_longhorn: \'{{ tuple . "longhorn" | include "unit-enabled" | include "as-bool" }}\'
openshift:
baseDomain: \'{{ .Values.cluster_domain }}\'
version: 4.19.0-okd-scos.ec.1
sshAuthorizedKey: ""
pullSecret: ""
additionalEnabledCapabilities:
- marketplace
- NodeTuning
- Storage
timeouts:
cluster_delete_hook_job_timeout: 300
default_node_class: generic
node_classes: {}
# generic:
# kernel_cmdline:
# hugepages:
# enabled: false
# 2M_percentage_total: ""
# 1G_percentage_total: ""
# default_size: 2M
# extra_options: ""
# kubelet_extra_args: {}
# kubelet_config_file_options: {}
# nodeTaints: {}
# nodeLabels: {}
# nodeAnnotations: {}
# additional_commands:
# pre_bootstrap_commands: []
# post_bootstrap_commands: []
### !!! The Values.cluster.metallb should not be user provided. Use Values.metallb instead !!!
metallb: >-
{{- $cluster_metallb_resources := dict }}
{{- tuple $cluster_metallb_resources .Values.metallb .Values._internal.metallb | include "merge-append" }}
{{ $cluster_metallb_resources | include "preserve-type" }}
rke2:
gzipUserData: \'{{ .Values.cluster.capi_providers.infra_provider | eq "capo" | include "preserve-type" }}\'
# annotations:
# cluster:
# cluster-maxunavailable.sylva.org/enabled: true
cluster_machines_ready:
wait_timeout: 600s
capd_docker_host: unix:///var/run/docker.sock
openstack:
#external_network_id # Can be provided if a FIP is needed in order to reach the management cluster VIP
floating_ip: "" # will typically be set by capo-cluster-resources
storageClass:
name: cinder-csi # name of the storageClass to be created
#type: xxx # please provide the cinder volume type, e.g. \'ceph_sas\' (must exist in OpenStack)
control_plane_affinity_policy: soft-anti-affinity
worker_affinity_policy: soft-anti-affinity
oci_registry_insecure: false
metal3: {}
# external_bootstrap_ip:
# bootstrap_ip:
opennebula: {}
vsphere:
vsphere-cpi:
vsphere_conf:
# Global properties in this section will be used for all specified vCenters unless overriden in VirtualCenter section.
global:
port: 443
# set insecure-flag to true if the vCenter uses a self-signed cert
insecureFlag: true
# settings for using k8s secret
secretName: vsphere-cloud-secret
secretNamespace: kube-system
# vcenter section
vcenter:
\'{{ .Values.cluster.capv.server }}\':
server: \'{{ .Values.cluster.capv.server }}\'
datacenters:
- \'{{ .Values.cluster.capv.dataCenter }}\'
cluster_virtual_ip: \'\'
# Admin password that will be configured by default on various units # FIXME, only used for SSO today see https://gitlab.com/sylva-projects/sylva-core/-/issues/503
# [WARNING] This value cannot be overwritten on production environment (env_type: prod)
admin_password: \'{{ .Values._internal.default_password }}\'
flux:
kustomize:
concurrent: 10
log_level: info
features_gates: {}
helm:
concurrent: 10
features_gates:
OOMWatch: true
DetectDrift: true
CorrectDrift: true
log_level: debug
source:
concurrent: 10
log_level: info
features_gates: {}
flux_webui:
admin_user: admin
# pass the values for node_creation and node_drain timeout in minutes
sylvactl_timing_hints:
node_create_timeout: >-
{{ ternary 30 15 (eq .Values.cluster.capi_providers.infra_provider "capm3") | include "preserve-type" }}
node_drain_timeout: >-
{{ ternary 30 15 (eq .Values.cluster.capi_providers.infra_provider "capm3") | include "preserve-type" }}
display_external_ip: \'{{ .Values.openstack.floating_ip | eq "" | ternary .Values.cluster_virtual_ip .Values.openstack.floating_ip }}\'
cluster_domain: sylva
external_hostnames:
rancher: \'rancher.{{ .Values.cluster_domain }}\'
vault: \'vault.{{ .Values.cluster_domain }}\'
keycloak: \'keycloak.{{ .Values.cluster_domain }}\'
flux: \'flux.{{ .Values.cluster_domain }}\'
neuvector: \'neuvector.{{ .Values.cluster_domain }}\'
harbor: \'harbor.{{ .Values.cluster_domain }}\'
goldpinger: \'goldpinger.{{ .Values.cluster_domain }}\'
os_image_server: \'\'
gitea: \'gitea.{{ .Values.cluster_domain }}\'
kunai: \'kunai.{{ .Values.cluster_domain }}\'
minio_operator_console: \'minio-operator-console.{{ .Values.cluster_domain }}\'
minio_monitoring: \'minio-monitoring.{{ .Values.cluster_domain }}\'
minio_monitoring_console: \'minio-monitoring-console.{{ .Values.cluster_domain }}\'
minio_logging: \'minio-logging.{{ .Values.cluster_domain }}\'
minio_logging_console: \'minio-logging-console.{{ .Values.cluster_domain }}\'
thanos: \'thanos.{{ .Values.cluster_domain }}\'
thanos_storegateway: \'thanos-storegateway.{{ .Values.cluster_domain }}\'
thanos_receive: \'thanos-receive.{{ .Values.cluster_domain }}\'
thanos_query: \'thanos-query.{{ .Values.cluster_domain }}\'
thanos_bucketweb: \'thanos-bucketweb.{{ .Values.cluster_domain }}\'
loki: \'loki.{{ .Values.cluster_domain }}\'
grafana: \'grafana.{{ .Values.cluster_domain }}\'
kubevirt_manager: \'kubevirt-manager.{{ .Values.cluster_domain }}\'
# openshift_assisted_service can be overwritten by openshift.assisted.serviceHostname
openshift_assisted_service: \'openshift-assisted-service.{{ .Values.cluster_domain }}\'
# openshift_assisted_images can be overwritten by openshift.assisted.imageHostname
openshift_assisted_images: \'openshift-assisted-images.{{ .Values.cluster_domain }}\'
external_certificates:
rancher: {}
vault: {}
keycloak: {}
flux: {}
neuvector: {}
harbor: {}
goldpinger: {}
os_image_server: {}
gitea: {}
minio_operator: {}
minio_monitoring: {}
minio_logging: {}
thanos: {}
loki: {}
grafana: {}
kubevirt_manager: {}
kunai: {}
audit_log:
level: 0
keycloak: {}
# cis benchmark is only for rke2 so far, e.g. rke2-cis-1.23-profile-hardened
cis_benchmark_scan_profile: \'{{ eq .Values.cluster.capi_providers.bootstrap_provider "cabpr" | ternary "rke2-cis-1.9-profile" "no-scan-profile-defined-for-kubeadm-cluster" }}\'
# os_images images that should be served and from where they should be downloaded
# if empty default value are used
os_images: {}
# to configure the SR-IOV VFs on the supported NICs of cluster nodes
sriov:
node_policies: {}
# mypolicy1:
# nodeSelector: {} # <<< lets you further limit the SR-IOV capable nodes on which the VFs have to be created in a certain config; if not set it applies to all SR-IOV nodes
# resourceName: ""
# numVfs: ""
# deviceType: "" # supported values: "netdevice" or "vfio-pci"
# nicSelector:
# deviceID: ""
# vendor: ""
# pfNames: []
# rootDevices: []
# add ceph cluster details
ceph:
cephfs_csi:
clusterID: ""
fs_name: ""
adminID: ""
adminKey: ""
monitors_ips: []
# add your proxy settings if required
proxies:
https_proxy: ""
http_proxy: ""
no_proxy: "" # you can also use no_proxy_additional, see below
# you can disable default values for no_proxy (localhost,.svc,.cluster.local.,.cluster.local,.sylva)
# Ex: localhost: false
no_proxy_additional:
10.0.0.0/8: true
192.168.0.0/16: true
172.16.0.0/12: true
localhost: true
127.0.0.1: true
.svc: true
\'{{ printf ".%s" .Values.cluster_domain }}\': true
.cluster.local.: true
.cluster.local: true
# configure containerd registry mirrors following https://github.com/containerd/containerd/blob/main/docs/hosts.md
registry_mirrors:
default_settings: # <<< These settings will apply to all configured mirrors
capabilities: ["pull", "resolve"]
# skip_verify: true
# override_path: true
# hosts_config:
# docker.io:
# - mirror_url: http://your.mirror/docker
# registry_settings: # <<< Host settings can be used to override default_settings
# skip_verify: false
# is_default_mirror: true # <<< The server configuration will be the same as host. In case of failure, upstream registry won\'t be used and keep the environment controlled.
# registry.k8s.io:
# - mirror_url: ...
# _default:
# - mirror_url: ...
# deploy emulated baremetal nodes in bootstrap cluster
libvirt_metal:
image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/libvirt-metal:{{ .Values.source_templates | dig "libvirt-metal" "spec" "ref" "tag" "_undefined_"}}
nodes: {}
#management-cp:
# memGB: 12
# numCPUs: 6
#
#workload-cp:
# memGB: 4
# numCPUs: 2
#
#workload-md:
# memGB: 2
# numCPUs: 2
# set the type of environment between 3 possible values: dev, ci and prod
env_type: prod
# set NTP servers by IP or FQDN and enable their usage for control plane nodes
ntp:
enabled: false
servers:
- 1.2.3.4
- europe.pool.ntp.org
# These two sylva_core_oci_registry/sylva_base_oci_registry values govern which OCI repos are used
#
# This matters for:
# 1) OCI deployments
# 2) for non-OCI deployments for retrieving artifacts such as metal3 OS images
#
# For (1) sylva_base_oci_registry is automatically derived from the OCI repo used for sylva-unit HelmRelease.
# For (2) sylva_base_oci_registry can be customized, to use an OCI registry other than registry.gitlab.com/sylva-projects
#
# It should in general not be any need to override sylva_core_oci_registry, which is derived
# from sylva_base_oci_registry.
#
# sylva_base_oci_registry defaults to oci://registry.gitlab.com/sylva-projects
sylva_base_oci_registry:
\'{{
regexReplaceAll
"/sylva-core/?$"
(lookup "source.toolkit.fluxcd.io/v1" "HelmRepository" .Release.Namespace "sylva-units" | dig "spec" "url" "")
""
| default "oci://registry.gitlab.com/sylva-projects"
}}\'
sylva_core_oci_registry: \'{{ .Values.sylva_base_oci_registry }}/sylva-core\'
os_images_oci_registries:
sylva:
url: \'{{ .Values.sylva_base_oci_registry }}/sylva-elements/diskimage-builder\'
tag: \'{{ .Values.sylva_diskimagebuilder_version }}\'
cosign_publickey: \'{{ .Values.security.os_images.cosign_public_key }}\'
enabled: true # can be set to false to disable all the images under sylva_diskimagebuilder_images that have \'os_images_oci_registry: sylva\'
sylva-kiwi-imagebuilder:
url: \'{{ .Values.sylva_base_oci_registry }}/sylva-elements/kiwi-imagebuilder\'
tag: \'{{ .Values.sylva_kiwi_imagebuilder_version }}\'
cosign_publickey: \'{{ .Values.security.kiwi_image_builder_cosign_public_key }}\'
k8s_version_short: "1.32"
# Renovate Bot needs additional information to detect sylva diskimage-builder version
# renovate: depName=sylva-projects/sylva-elements/diskimage-builder
sylva_diskimagebuilder_version: 0.6.2
# renovate: depName=sylva-projects/sylva-elements/kiwi-imagebuilder
sylva_kiwi_imagebuilder_version: 0.3.1
# \'sylva_diskimagebuilder_images\' determines which images will be available for CAPO and CAPM3
# via \'os_image_selector\' or \'image_keys\'
#
# by default it lists OCI artifacts found at gitlab.com/sylva-projects/sylva-elements/diskimage-builder
# or a mirror repo if os_images_oci_registries.sylva is overridden
#
sylva_diskimagebuilder_images:
ubuntu-noble-hardened-rke2-1-32:
os_images_oci_registry: sylva
ubuntu-noble-hardened-rke2-1-31:
os_images_oci_registry: sylva
ubuntu-noble-hardened-rke2-1-30:
os_images_oci_registry: sylva
ubuntu-noble-plain-rke2-1-32:
os_images_oci_registry: sylva
ubuntu-noble-plain-rke2-1-31:
os_images_oci_registry: sylva
ubuntu-noble-plain-rke2-1-30:
os_images_oci_registry: sylva
ubuntu-noble-plain-kubeadm-1-32:
os_images_oci_registry: sylva
ubuntu-noble-plain-kubeadm-1-31:
os_images_oci_registry: sylva
ubuntu-noble-plain-kubeadm-1-30:
os_images_oci_registry: sylva
opensuse-15-6-plain-rke2-1-32:
os_images_oci_registry: sylva
opensuse-15-6-plain-rke2-1-31:
os_images_oci_registry: sylva
opensuse-15-6-plain-rke2-1-30:
os_images_oci_registry: sylva
opensuse-15-6-hardened-rke2-1-32:
os_images_oci_registry: sylva
opensuse-15-6-hardened-rke2-1-31:
os_images_oci_registry: sylva
opensuse-15-6-hardened-rke2-1-30:
os_images_oci_registry: sylva
opensuse-15-6-plain-kubeadm-1-32:
os_images_oci_registry: sylva
opensuse-15-6-plain-kubeadm-1-31:
os_images_oci_registry: sylva
opensuse-15-6-plain-kubeadm-1-30:
os_images_oci_registry: sylva
ubuntu-noble-plain-ck8s-1-32:
os_images_oci_registry: sylva
ubuntu-noble-plain-ck8s-1-31:
os_images_oci_registry: sylva
leapmicro-6-0-plain-rke2-1-30:
os_images_oci_registry: sylva-kiwi-imagebuilder
leapmicro-6-0-plain-rke2-1-31:
os_images_oci_registry: sylva-kiwi-imagebuilder
leapmicro-6-0-plain-rke2-1-32:
os_images_oci_registry: sylva-kiwi-imagebuilder
# this dictionary can be extended to let os-image-server serve additional OS images
# if the mgmt cluster uses capm3, the os-image-server by default will serve the OS images used by the mgmt cluster
# os_image_server_additional_selectors is used to serve additional OS images for workload clusters
#
# os_image_server_additional_selectors:
# k28-for-my-workload-clusters:
# os: opensuse
# os-version: 15.6
# k8s-version: v1.28.*
# k8s-flavor: kubeadm
os_image_server_additional_selectors: {}
# this dict can be enriched at deployment time to feed additional information
# into the sylva-units-status ConfigMap
additional_sylva_units_status_info: {}
# os_images_default_download_storage_space defines the default size of volumes
# used by os-image-server to download each OS image OCI artifacts
# and uncompress the image stored inside
#
# it needs to cover the size of the OCI artifact *plus* the size of the uncompressed image
#
# this needs to be set to ~25Gi for hardened images
# (until https://gitlab.com/sylva-projects/sylva-elements/diskimage-builder/-/issues/57 is addressed)
os_images_default_download_storage_space: 30Gi
get_openstack_images_per_image_timeout_minutes: 30
security:
vault:
# specify vault path names for secrets store and kubernetes authentication (\'secret\' and \'kubernetes\' by default)
paths:
secret: secret
k8s: kubernetes
# External Vault values (do not use for the moment)
# external_vault_url: https://myvault.mydomain:8200
# external_vault_ca: |
# -----BEGIN CERTIFICATE-----
# MIIC6jCCAdKgAwIBAgIBADANBgkqh....
external_x509_issuer: {}
# issuer_type: acme or vault
# vault_token: authent token for vault,
# server: e.g. https://vault.external-domain:8200
# vault_path: e.g. /pki/sylva, only when using vault issuer
default_cosign_public_key: | # the default public key in PEM format to be used to verify cosign signatures
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEcuuLszwy0u7y394KY6GW1BgjfaU6
aK7e95MV+ikZnRfR5EHtqEk1tScNhMqQJMpuFs3QH1TVBr6TIWVLx5cUtg==
-----END PUBLIC KEY-----
kiwi_image_builder_cosign_public_key: |
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf/H8Nl7IOJSwQ3fonpdoKqZWJsgt
+zI1cV172PHY/C/5YhVnMJkgcvOzSkdVxjAjOBoOLReYqF6O7GKRY6dOFw==
-----END PUBLIC KEY-----
upstream_images_signature:
verify: false # verify the signature of the upstream images
policy_action: Audit # action taken by kyverno when validation fails report (set Audit) or block the pod (set Enforce)
repository: \'{{ .Values.sylva_base_oci_registry }}/sylva-core/signatures\' # the repository storing the signatures
# the public key that kyverno must use to verify the signatures of the trusted upstream images
cosign_public_key: \'{{ .Values.security.default_cosign_public_key }}\'
images_list: # the list of trusted images to verify, wildcard is allowed, e.g.
- quay.io/keycloak/keycloak*
- hashicorp/vault*
calico_wireguard_enabled: false # disable wireguard by default
neuvector_scanning_enabled: false # disable Neuvector scanning capability by default
os_images:
skip_signing_check: false
cosign_public_key: \'{{ .Values.security.default_cosign_public_key }}\'
oci_artifacts:
skip_signing_check: false
cosign_public_key: | # the OCI public key is not the default public key used to verify sylva elements, e.g. os_images
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEN6LNycNA/OB8/dtqTPZcPDuLnxW
hR0Rskmno7Lx1WqBl2ylN/sfkLEAPfCkizceHiu/fw8lnsPq9uSGlAICeQ==
-----END PUBLIC KEY-----
sbom_operator:
targets:
- configmap # list of targets (i.e. places where the SBOM is uploaded). Supported targets are configmap, oci and dtrack
trivy_operator:
insecure_registries: {}
mirrors: {}
firewall:
enabled: false
# By default, allow external ingress traffic from all sources
allowed_public_subnets:
- 0.0.0.0/0
# Specify the following values to use different subnets for each type of flow
# allowed_public_subnets_for_gui:
# allowed_public_subnets_for_kubeapi:
# allowed_public_subnets_for_ssh
# We drop packets by default
# This value can be set to Allow for troubleshooting
default_policy: Allow
secret_manager:
variant: vault
monitoring:
platform_tag: Sylva
thanos:
# receive_url:
# - for mgmt cluster, the local thanos k8s service is used (the URL below)
# - for workload clusters, this is overridden via mgmt_cluster_state_values.monitoring.thanos.receive_url
# to point to the mgmt cluster Thanos receive ingress
receive_url: http://thanos-receive.thanos.svc.cluster.local:19291/api/v1/receive
receive:
persistence:
size: \'{{ has .Values.env_type (list "dev" "ci") | ternary "10Gi" "20Gi" }}\'
compactor:
persistence:
size: \'{{ has .Values.env_type (list "dev" "ci") | ternary "20Gi" "50Gi" }}\'
storegateway:
persistence:
size: \'{{ has .Values.env_type (list "dev" "ci") | ternary "10Gi" "15Gi" }}\'
ruler:
persistence:
size: \'{{ has .Values.env_type (list "dev" "ci") | ternary "2Gi" "4Gi" }}\'
objstoreConfig:
# only taken into account when monitoring.thanos.objstoreConfig.config is not empty
type: "S3"
# custom store config
config: {}
# https://github.com/thanos-io/objstore#supported-providers-clients
# lists all supported provides
alertmanager:
config: {}
webhooks:
jiralert:
env: {}
config: {}
snmp_notifier:
config:
# trapserver destinations
# address:port format
destinations: []
# SNMP version
version: V2c # V3
auth:
# V2c
community: "public"
# V3
auth_protocol: "SHA" # MD5 or SHA
priv_protocol: "AES" # DES or AES
username: "snmp-user"
password: "snmp-auth-pass"
priv_password: "snmp-priv-pass"
default_extra_args:
- --alert.severities=critical,error,warning,info
- --trap.user-object=4=/etc/snmp_notifier/platform_tag.tpl
- --trap.user-object=5=/etc/snmp_notifier/alertname.tpl
- --trap.user-object=6=/etc/snmp_notifier/cluster.tpl
- --trap.user-object=7=/etc/snmp_notifier/team.tpl
- --snmp.retries=1
- --snmp.timeout=5s
- --log.level=info
- --snmp.version={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.version | default "V2c" }}
snmp_v3_extra_args:
# --no-snmp.authentication-enabled to disable authentication
- --snmp.authentication-enabled
- --snmp.authentication-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.auth_protocol }}
# --no-snmp.private-enabled to disable encryption
- --snmp.private-enabled
- --snmp.private-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.priv_protocol }}
final_extra_args: |
{{ if eq .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.version "V3" }}
{{ concat .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.default_extra_args .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.snmp_v3_extra_args | include "preserve-type" }}
{{ else }}
{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.default_extra_args | include "preserve-type" }}
{{ end }}
#monitoring:
# thanos:
# objstoreConfig:
# type: "GCS"
# # https://github.com/thanos-io/objstore#gcs
# config:
# bucket: "test"
# service_account: |-
# {
# "type": "service_account",
# "project_id": "project",
# "private_key_id": "abcdefghijklmnopqrstuvwxyz12345678906666",
# "private_key": "-----BEGIN PRIVATE KEY-----\\...\\n-----END PRIVATE KEY-----\\n",
# "client_email": "thanos@example.com",
# "client_id": "123456789012345678901",
# "auth_uri": "https://accounts.google.com/o/oauth2/auth",
# "token_uri": "https://oauth2.googleapis.com/token",
# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/thanos%40gitpods.iam.gserviceaccount.com"
# }
# alertmanager:
# # Supported values from: https://prometheus.io/docs/alerting/latest/configuration/
# config:
# global:
# resolve_timeout: 15m
# route:
# group_by: [\'namespace\', \'alertname\', \'cluster\', \'severity\']
# receiver: \'jira_receiver\'
# routes:
# - matchers:
# - severity=~".*"
# receiver: jira_receiver
# continue: true
# - matchers:
# - severity=~".*"
# receiver: snmp_traps_receiver
# # disable grouping; send each alert individually
# group_by: ["..."]
# continue: true
# receivers:
# - name: \'jira_receiver\'
# webhook_configs:
# - url: \'http://alertmanager-jiralert:9097/alert\'
# send_resolved: false
# # Supported values from: https://github.com/prometheus-community/helm-charts/blob/main/charts/jiralert/values.yaml
# # Example config file: https://github.com/prometheus-community/jiralert/blob/master/examples/jiralert.yml
# - name: \'snmp_traps_receiver\'
# webhook_configs:
# - url: \'http://alertmanager-snmp-notifier:9464/alerts\'
# send_resolved: false
# webhooks:
# jiralert:
# # If missing, the proxies are those configured in `.Values.proxies`
# env:
# https_proxy: "https://example.com:3128"
# http_proxy: "https://example.com:3128"
# no_proxy: "127.0.0.1,jira.example.com"
# config:
# # Both `defaults` and `receivers` are required
# defaults:
# # API access fields.
# api_url: \'https://example.com\'
# user: \'user\'
# password: \'password\'
# # Alternatively to user and password use a Personal Access Token
# # personal_access_token: "Your Personal Access Token". See https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html
# receivers:
# # At least one receiver must be defined
# # Must match the Alertmanager receiver name. Required.
# - name: jira_receiver
# project: Sylva
# snmp_notifier:
# config:
# # trapserver destinations
# destinations:
# - 1.2.3.4:162
# - a.b.c.d:162
# version: V3
# auth:
# # V3
# auth_protocol: "SHA" # MD5 or SHA
# priv_protocol: "AES" # DES or AES
# username: "snmp-user"
# password: "snmp-auth-pass"
# priv_password: "snmp-priv-pass"
# # update the snmp_v3_extra_args to match the snmptrap sever
# # should only need to be modified if disabling authentication and/or encryption
# snmp_v3_extra_args:
# # use authentication
# - --snmp.authentication-enabled
# - --snmp.authentication-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.auth_protocol }}
# # but no encryption
# - --no-snmp.private-enabled
logging:
# loki_url:
# - for mgmt cluster, the local Loki k8s service is used
# - for workload clusters, this is overridden via mgmt_cluster_state_values.logging.loki_url
# to point to the mgmt cluster Loki
loki_url: http://loki-gateway.loki.svc.cluster.local
# the following values are mapped into sylva-logging-flows Helm unit values
# (see https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows)
flows: {}
clusterflows: {}
outputs: {}
clusteroutputs: {}
metallb: {}
#l2_lbs: # add your metallb-l2 config if required
#address_pools:
# - name: my-custom-pool
# addresses:
# - 10.122.22.151/32
# - 10.10.10.10-10.10.10.120
#l2_options:
# advertisements:
# - node_selectors:
# - kubernetes.io/hostname: hostB # to limit the set of nodes for a given advertisement, the node selector must be set
# interface: eth1 # interfaces selector can also be used together with node_selectors
# advertised_pools:
# - my-custom-pool # additional IP pools to be advertised to this peer
#bgp_lbs: # add your metallb-l3 config if required
#l3_options:
# bfd_profiles:
# bfd-profile-bgp:
# receiveInterval: 1000
# transmitInterval: 1000
# detectMultiplier: 3
# bgp_peers:
# bgp-peer1:
# local_asn: 64511 # example only, must be updated
# peer_asn: 64510 # example only, must be updated
# peer_address: 10.122.22.129
# password: bgp-peer-password
# bfd_profile: bfd-profile-bgp
# node_selectors:
# - matchLabels:
# kubernetes.io/hostname: hostB
# advertised_pools:
# - lbpool # default IP pool used for kube-api and ingress exposure
# - my-custom-pool # additional IP pools to be advertised to this peer
# receive_routes: # optional, only for learning routes from BGP peers on secondary interfaces
# mode: all
# # or
# prefixes:
# - prefix: 10.10.10.10/32
# - prefix: 192.168.2.0/24
# ge: 25
# le: 28
#address_pools:
# my-custom-pool:
# addresses:
# - 10.122.22.151/32
# - 10.10.10.10-10.10.10.120
calico_helm_values:
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
global:
clusterDNS: \'{{ .Values._internal.calico.clusterDNS }}\'
clusterDomain: \'{{ .Values._internal.calico.clusterDomain }}\'
serviceCIDR: \'{{ .Values._internal.calico.serviceCIDR }}\'
installation:
calicoNetwork:
bgp: Disabled
mtu: |
{{ .Values._internal.calico_mtu | include "preserve-type" }}
nodeAddressAutodetectionV4:
\'{{
tuple
.Values._internal.default_calico_autodetection_method
(not .Values._internal.capo_calico_autodetection_method_use_canReach_vip)
| include "set-only-if"
}}\'
ipPools:
- cidr: \'{{ first .Values.cluster.cluster_pods_cidrs }}\'
encapsulation: VXLAN
natOutgoing: Enabled
registry: UseDefault
felixConfiguration:
wireguardEnabled: \'{{ .Values.security.calico_wireguard_enabled | include "preserve-type" }}\'
metallb_helm_values:
prometheus: >-
{{
tuple (dict
"rbacPrometheus" true
"serviceAccount" "rancher-monitoring-prometheus"
"namespace" "cattle-monitoring-system"
"serviceMonitor" (dict "enabled" true)
)
(tuple . "monitoring" | include "unit-enabled")
| include "set-only-if"
}}
loadBalancerClass: \'{{ .Values._internal.loadBalancerClass }}\'
controller:
image:
repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/metallb-controller
tag: v0.15.2-sylva-custom
nodeSelector:
node-role.kubernetes.io/control-plane: \'{{ .Values._internal.cp_node_label_value }}\'
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- effect: NoExecute
key: node-role.kubernetes.io/etcd
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
speaker:
image:
repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/metallb-speaker
tag: v0.15.2-sylva-custom
ignoreExcludeLB: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" | include "as-bool" }}\'
priorityClassName: system-cluster-critical
frr:
enabled: false
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- effect: NoExecute
key: node-role.kubernetes.io/etcd
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
frrk8s:
enabled: \'{{ tuple . .Values._internal.metallb_frrk8s_required | include "interpret-as-bool" }}\'
_internal:
state:
# is_upgrade reflects whether or not the currently processed release
# is an "upgrade", ie. a new release coming after a full installation
# of Sylva.
#
# Helm built-in .Release.IsUpgrade is not used because it reflects only whether
# or not we are on the first version of the Helm release, but because of how pivot
# is done, and because there may be failed attempts before the first that succeeds,
# we have cases where .Release.IsUpgrade is true while we haven\'t ever reached yet
# a full installation.
#
# The criteria we rely on is whether this release of sylva-units reached reconciliation
# of the sylva-units-status Kustomization, which is the last reconciliation of the
# dependency chain.
is_upgrade: >-
{{ gt (lookup "kustomize.toolkit.fluxcd.io/v1" "Kustomization" .Release.Namespace "sylva-units-status" | dig "status" "inventory" "entries" list | len) 0
| include "preserve-type"
}}
is_multus_uninstall: >-
{{- tuple . "_internal.state.is_upgrade" | include "interpret" -}}
{{- $multus_is_deployed := lookup "v1" "ConfigMap" .Release.Namespace "sylva-units-status" | dig "data" "enabled-units" "{}" | fromYaml | dig "multus" false -}}
{{- $multus_is_disabled := not (tuple . "multus" | include "unit-enabled") -}}
{{ and .Values._internal.state.is_upgrade $multus_is_disabled $multus_is_deployed | include "preserve-type" }}
k8s_version: \'{{ include "interpret-as-string" (tuple . .Values.cluster.k8s_version) }}\'
mgmt_cluster: false
controlplane_kind: \'{{ get (dict
"cabpk" "KubeadmControlPlane"
"cabpr" "RKE2ControlPlane"
"cabpoa" "OpenshiftAssistedControlPlane"
"cabpck" "CK8sControlPlane"
) .Values.cluster.capi_providers.bootstrap_provider }}\'
# compute the hash value for clouds_yaml
clouds_yaml_hash: >-
{{
.Values.cluster | dig "capo" "clouds_yaml" "clouds" dict | toYaml
| sha256sum | trunc 8
}}
# \'cluster_machines_ready_unit_deps\' contains all the dependencies of the \'cluster\' unit, direct and indirect,
# except the ones set due to depending on cluster-machines-ready.
# (this is used by unit_templates.base-deps)
# cluster_machines_ready_unit_deps: # to avoid a circular references loop between interpret variables, this is computed directly in units.yaml
ha_cluster:
# immutable ha_cluster.is_ha : keep track of first install mode HA (true) or non-HA (false)
is_ha: >-
{{
lookup "v1" "Secret" .Release.Namespace "sylva-units-values"
| dig "data" "values" "" | b64dec | fromYaml | default dict
| dig "_internal" "ha_cluster" "is_ha" (ge (int .Values.cluster.control_plane_replicas) 3 | ternary true false)
| include "preserve-type"
}}
# variable just used to trigger error while requested control_plane_replicas is incompatible with ha_cluster.is_ha computed during first install
checkpoint: >-
{{- $current_config := lookup "v1" "Secret" .Release.Namespace "sylva-units-values" | dig "data" "values" "" | b64dec | fromYaml | default dict -}}
{{- $is_ha := dig "_internal" "ha_cluster" "is_ha" "initial run" $current_config | toString -}}
{{- if not (eq $is_ha "initial run") -}}
{{- if (eq $is_ha "false") -}}
{{- (ge (int .Values.cluster.control_plane_replicas) 3) | ternary "" "requested control_plane_replicas={{.Values.cluster.control_plane_replicas}} : compatible with initial non HA mode" | required (print "requested control_plane_replicas=" .Values.cluster.control_plane_replicas ": can\'t continue, first install happened in non-HA mode, this value should be 1") -}}
{{- else -}}
{{- (lt (int .Values.cluster.control_plane_replicas) 3) | ternary "" "requested control_plane_replicas={{.Values.cluster.control_plane_replicas}} : compatible with initial HA mode" | required (print "requested control_plane_replicas=" .Values.cluster.control_plane_replicas ": can\'t continue, current mode is HA, requires a value >=3") -}}
{{- end -}}
{{- else -}}
install run, requested control_plane_replicas={{.Values.cluster.control_plane_replicas}}
{{- end -}}
# variable used for the validation of .Values.os_images fields for baremetal deployments
os_images_validation: >-
{{- if ( gt (.Values.os_images | len) 0 ) }}
{{- $capm3_required := tuple . "os-image-server" | include "unit-enabled" }}
{{- $capo_required := .Values.cluster.capi_providers.infra_provider | eq "capo" }}
{{- range $os_image_name, $os_image_props := .Values.os_images }}
{{- if $capm3_required }}
{{- $os_image_props.sha256 | required ( print "\'sha256\' must be specified for os_image \'" $os_image_name "\'" ) }}
{{- dig "image-format" "" $os_image_props | required ( print "\'image-format\' must be specified for os_image \'" $os_image_name "\'. It can be raw or qcow2" ) }}
{{- end }}
{{ if $capo_required }}
{{- $os_image_props.md5 | required ( print "\'md5\' must be specified for os_image \'" $os_image_name "\'" ) }}
{{- end }}
{{- end }}
{{- end }}
# default replica number computed from the ha_cluster.is_ha value
default_replicas: \'{{ tuple . .Values._internal.ha_cluster.is_ha 3 1 | include "interpret-ternary" | int | include "preserve-type" }}\'
# value of "node-role.kubernetes.io/control-plane" node label that differs between kubeadm and rke2
cp_node_label_value: \'{{ ((tuple . .Values.cluster.capi_providers.bootstrap_provider | include "interpret-as-string") | eq "cabpr") | ternary "true" "" }}\'
monitoring:
enabled: \'{{ tuple . "monitoring" | include "unit-enabled" | include "as-bool" }}\'
conditionals:
longhorn:
enabled: \'{{ tuple . "longhorn" | include "unit-enabled" }}\'
flux:
enabled: \'{{ tuple . "flux-system" | include "unit-enabled" }}\'
minio:
enabled: \'{{ tuple . "minio-monitoring" | include "unit-enabled" }}\'
loki:
enabled: \'{{ tuple . "loki" | include "unit-enabled" }}\'
kepler:
enabled: \'{{ tuple . "kepler" | include "unit-enabled" }}\'
snmp:
enabled: \'{{ tuple . "snmp-exporter" | include "unit-enabled" }}\'
crossplane:
enabled: \'{{ tuple . "crossplane" | include "unit-enabled" }}\'
harbor:
enabled: \'{{ tuple . "harbor" | include "unit-enabled" }}\'
thanos:
store_config_internal:
bucket: "thanos"
endpoint: "minio.minio-monitoring.svc.cluster.local"
insecure: false
http_config:
tls_config:
insecure_skip_verify: true
insecure_skip_verify: true
# Use reduced reduncancy storage class (EC:1) as we are using two replicas in longhorn
put_user_metadata:
X-Amz-Storage-Class: REDUCED_REDUNDANCY
objstoreConfig:
type: \'{{ ternary "S3" .Values.monitoring.thanos.objstoreConfig.type (.Values.monitoring.thanos.objstoreConfig.config | empty) }}\'
# construct objstoreConfig for intenal MinIO storage
# or use user defined options
config: |-
{{ if (.Values.monitoring.thanos.objstoreConfig.config | empty) }}
{{ .Values._internal.thanos.store_config_internal | include "preserve-type" }}
{{ else }}
{{ .Values.monitoring.thanos.objstoreConfig.config | include "preserve-type" }}
{{ end }}
# thanos user and password
thanos_user: \'{{ .Values.cluster.name }}\'
thanos_password: \'{{ include "internalPersistentRandomPasswd" (tuple . "thanos_password") }}\'
# loki user and password
loki_user: \'{{ .Values.cluster.name }}\'
loki_password: \'{{ include "internalPersistentRandomPasswd" (tuple . "loki_password") }}\'
# Grafana password
grafana_admin_password: \'{{ include "internalPersistentRandomPasswd" (tuple . "grafana_admin_password") }}\'
# kubevirt admin user and password
kubevirt_admin_user: \'admin\'
kubevirt_admin_password: \'{{ include "internalPersistentRandomPasswd" (tuple . "kubevirt_admin_password") }}\'
default_password: \'{{ include "internalPersistentRandomPasswd" (tuple . "default_password") }}\'
default_longhorn_storage_class: longhorn
default_storage_class: >-
{{- if .Values.default_storage_class -}}
{{ .Values.default_storage_class }}
{{- else -}}
{{/* ensure .Values._internal.storage_class_unit_map is interpreted */}}
{{- tuple . "_internal.storage_class_unit_map" | include "interpret" -}}
{{- $envAll := . -}}
{{- $enabledSCs := list -}}
{{- $enabledUnits := list -}}
{{- range $sc, $unit := .Values._internal.storage_class_unit_map -}}
{{- if and (ne $sc "local-path") (tuple $envAll $unit | include "unit-enabled") -}}
{{- $enabledSCs = append $enabledSCs $sc -}}
{{- $enabledUnits = (not (has $unit $enabledUnits) | ternary (append $enabledUnits $unit) $enabledUnits) -}}
{{- end -}}
{{- end -}}
{{- if eq (len $enabledSCs) 1 -}}
{{- index $enabledSCs 0 -}}
{{- else if and (eq (len $enabledUnits) 1) (eq (index $enabledUnits 0) "longhorn") -}}
{{ .Values._internal.default_longhorn_storage_class }}
{{- else if eq (len $enabledSCs) 0 -}}
local-path
{{- else -}}
{{- fail (printf "Multiple storage classes are available based on the enabled units (%s), you need to specify the default one via \'default_storage_class\'" ($enabledSCs|join ", ")) -}}
{{- end -}}
{{- end -}}
# maps storage class names to the name of the unit that implements it
# (a storage class does not need to appear in this map if the required unit has the same name)
storage_class_unit_map:
local-path: local-path-provisioner
single-replica-storageclass: longhorn
two-replicas-storageclass: longhorn
longhorn: longhorn
vsphere-csi: vsphere-csi-driver
\'{{ .Values.openstack.storageClass.name }}\': cinder-csi
cephfs-csi: ceph-csi-cephfs
nfs-ganesha: nfs-ganesha
default_storage_class_unit: \'{{ tuple . .Values._internal.default_storage_class_unit_ref | include "interpret-as-string" }}\'
default_storage_class_unit_ref: >-
{{- tuple . "_internal.default_storage_class" | include "interpret" -}}
{{- tuple . "_internal.storage_class_unit_map" | include "interpret" -}}
{{- .Values._internal.storage_class_unit_map | dig .Values._internal.default_storage_class (printf "storage class unit not found for %s" .Values._internal.default_storage_class) -}}
storage_class_RWX_support:
- longhorn
- two-replicas-storageclass
- single-replica-storageclass
- cephfs-csi
default_storage_class_RWX_support: >-
{{- $default_storage_class := tuple . .Values._internal.default_storage_class | include "interpret-as-string" -}}
{{- has $default_storage_class .Values._internal.storage_class_RWX_support -}}
default_storage_class_access_mode_rwx: \'{{ eq (tuple . .Values._internal.default_storage_class_RWX_support | include "interpret-as-string") "true" | ternary "ReadWriteMany" "ReadWriteOnce" }}\'
# we could use rwx as default when available, if we find a way to automate a migration from rwo to rwx
# (see https://gitlab.com/sylva-projects/sylva-core/-/issues/1455)
# we could also implement a way to select RWX on fresh deployments
# harbor_storage_access_mode: \'{{ tuple . .Values._internal.default_storage_class_access_mode_rwx | include "interpret-as-string" }}\'
harbor_storage_access_mode: ReadWriteOnce
vault_replicas: >-
{{ or (.Values.cluster.capi_providers.infra_provider | eq "capd") (and (lt (int .Values.cluster.control_plane_replicas) 3) (eq .Values._internal.default_storage_class_unit "local-path")) | ternary 1 3 }}
vault_affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: vault
vault_cr: vault
topologyKey: kubernetes.io/hostname
vault_no_affinity: {}
keycloak_affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- sso
topologyKey: kubernetes.io/hostname
# this is used in use-oci-artifacts.values.yaml
# and in a few other places
sylva_core_version: \'{{ .Chart.Version }}\'
# We need to retrieve bootstrap_node_ip in management-cluster while using libvirt-metal emulation, it is stored in a ConfigMap by libvirt-metal unit in that case
bootstrap_node_ip: \'{{ lookup "v1" "ConfigMap" "sylva-system" "cluster-public-endpoint" | dig "data" "address" "not_found" | default "not_found" }}\'
display_external_ip_msg: \'{{ and (.Values.openstack.floating_ip | eq "") (.Values.cluster_virtual_ip | eq "") | ternary "It must resolve to the IP used to reach the cluster" (printf "It must resolve to %s" .Values.display_external_ip ) }}\'
k8s_version_map:
"1.30": \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary "v1.30.14+rke2r2" "v1.30.14" }}\'
1.31: >-
{{- $version := "v1.31.13" -}}
{{- if .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" -}}
{{- $version = "v1.31.13+rke2r1" -}}
{{- end -}}
{{- if .Values.cluster.capi_providers.bootstrap_provider | eq "cabpck" -}}
{{- $version = "v1.31.6" -}}
{{- end -}}
{{- $version -}}
1.32: >-
{{- $version := "v1.32.9" -}}
{{- if .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" -}}
{{- $version = "v1.32.9+rke2r1" -}}
{{- end -}}
{{- if .Values.cluster.capi_providers.bootstrap_provider | eq "cabpck" -}}
{{- $version = "v1.32.5" -}}
{{- end -}}
{{- $version -}}
#compute and validate the number of nodes
check_node_count_vs_bmh: >-
{{- if eq .Values.cluster.capi_providers.infra_provider "capm3" -}}
{{- $node_groups := dict "control-plane" (dict "requested_nodes" .Values.cluster.control_plane_replicas "available_bmh" 0) -}}
{{- range $md_name, $md_def := .Values.cluster.machine_deployments -}}
{{- $_ := set $node_groups $md_name (dict "requested_nodes" $md_def.replicas "available_bmh" 0) -}}
{{- end -}}
{{- $available_bmh := 0 -}}
{{- range $bmh_name, $bmh_def := .Values.cluster.baremetal_hosts -}}
{{- $bmh_def := mergeOverwrite (deepCopy $.Values.cluster.baremetal_host_default) $bmh_def -}}
{{- $machine_group := tuple (dict "Values" (deepCopy $.Values.cluster)) $bmh_name $bmh_def | include "getMatchingValuesDefinitionForBmh" | fromJson -}}
{{- if $machine_group.machineGroupName -}}
{{- $machine_group_name := $machine_group.machineGroupName -}}
{{- $available_bmh = $node_groups | dig $machine_group_name "available_bmh" 0 | add 1 -}}
{{- $node_groups = mergeOverwrite $node_groups (dict $machine_group_name (dict "available_bmh" $available_bmh)) -}}
{{- end -}}
{{- end -}}
{{- range $node_group_name, $node_group_def := $node_groups -}}
{{- if lt (int $node_group_def.available_bmh) (int $node_group_def.requested_nodes) -}}
{{- printf "Not enough available baremetal hosts for %s: %d requested, %d available" $node_group_name $node_group_def.requested_nodes $node_group_def.available_bmh | fail -}}
{{- end -}}
{{- end -}}
{{ $node_groups | include "preserve-type" }}
{{- end -}}
# total number of nodes in this cluster
node_count:
\'
{{- $n := .Values.cluster.control_plane_replicas -}}
{{- tuple . "cluster.machine_deployments" | include "interpret" -}}
{{- range $_, $md := .Values.cluster.machine_deployments -}}
{{- $n = add $n $md.replicas -}}
{{- end -}}
{{ $n | include "preserve-type" }}
\'
# total worker nodes in the cluster
worker_node_count: >-
{{- $n := 0 -}}
{{- tuple . "cluster.machine_deployments" | include "interpret" -}}
{{- range $_, $md := .Values.cluster.machine_deployments -}}
{{- $n = add $n $md.replicas -}}
{{- end -}}
{{ $n | include "preserve-type" }}
# upper bound to the number of longhorn nodes
longhorn_node_count_upper_bound: >-
{{- $count := 0 -}}
{{- if hasKey .Values.cluster "baremetal_host_default" -}}
{{- if hasKey .Values.cluster.baremetal_host_default "longhorn_disk_config" -}}
{{- $count = len .Values.cluster.baremetal_hosts -}}
{{- else -}}
{{- range $key, $value := .Values.cluster.baremetal_hosts -}}
{{- if hasKey $value "longhorn_disk_config" -}}
{{- $count = add $count 1 -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- range $key, $value := .Values.cluster.baremetal_hosts -}}
{{- if hasKey $value "longhorn_disk_config" -}}
{{- $count = add $count 1 -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- tuple . "_internal.node_count" | include "interpret" -}}
{{- $count = min $count .Values._internal.node_count -}}
{{- $count -}}
# this check will fail if a Longhorn storage class is selected as default
# and the cluster node/BMH settings are such that it can be determined that
# there aren\'t as many Longhorn nodes to support the number of replicas
# of the storage class
#
# this check can be disabled with, for instance:
# _internal.check_longhorn_storage_class_vs_replicas: forcefully-bypassed
check_longhorn_storage_class_vs_replicas: >-
{{- $min_nodes_for_class := (dict "longhorn" 3
"two-replicas-storageclass" 2) -}}
{{- range $sc,$min_nodes := $min_nodes_for_class -}}
{{- if and ($.Values.default_storage_class | eq $sc)
(lt ($.Values._internal.longhorn_node_count_upper_bound|int) $min_nodes) -}}
{{- fail (printf "\'%s\' storage class is selected as default_storage_class, but less than %d Longhorn nodes are available (no more than %s BMH/Node(s) identified)" $sc $min_nodes $.Values._internal.longhorn_node_count_upper_bound) -}}
{{- end -}}
{{- end -}}
os_image_server_images_configmap: capm3-os-image-server-os-images
default_calico_autodetection_method: >-
{{- if .Values.cluster.capi_providers.bootstrap_provider | eq "cabpk" }}
{{- dict "kubernetes" "NodeInternalIP" | include "preserve-type" -}}
{{- else -}}
{{- dict "canReach" (not (empty .Values.cluster.mgmt_cluster_ip) | ternary .Values.cluster.mgmt_cluster_ip .Values.cluster_virtual_ip) | include "preserve-type" -}}
{{- end -}}
capo_calico_autodetection_method_use_canReach_vip:
\'{{
and (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr")
(.Values.cluster.capi_providers.infra_provider | eq "capo")
(empty .Values.cluster.mgmt_cluster_ip) | include "preserve-type"
}}\'
calico_readiness_unit: "calico"
metallb:
# for CAPO the cluster_virtual_ip is injected into \'metallb-resources\' and \'cluster\' units via capo-cluster-resources configmap
# so we omit it here in that case
cluster_virtual_ip: \'{{ tuple .Values.cluster_virtual_ip (ne .Values.cluster.capi_providers.infra_provider "capo") | include "set-only-if" }}\'
l2_lbs:
l2_options:
advertisements:
l2advertisement:
advertised_pools:
- lbpool # this is the pool generated by from cluster_virtual_ip
interfaces: >-
{{- if eq .Values.cluster.capi_providers.infra_provider "capm3" }}
{{- $control_plane_networks := mergeOverwrite (deepCopy .Values.cluster.capm3.networks) (deepCopy (.Values.cluster.control_plane.capm3.networks | default dict)) }}
{{ .Values.cluster.cluster_primary_interfaces | default (list ( $control_plane_networks.primary.interface)) | include "preserve-type" }}
{{- else -}}
{{- list | include "preserve-type" -}}
{{- end }}
node_selectors:
- matchLabels:
node-role.kubernetes.io/control-plane: \'{{ .Values._internal.cp_node_label_value }}\'
metallb_service_annotations: \'{{ dict "metallb.io/loadBalancerIPs" .Values.cluster_virtual_ip "metallb.io/allow-shared-ip" "cluster-external-ip" | include "preserve-type" }}\'
metallb_values_check: >-
{{- tuple . "cluster.metallb_helm_values" | include "interpret" -}}
{{- tuple . "units.metallb.helmrelease_spec.values" | include "interpret" -}}
{{- if ne (omit .Values.cluster.metallb_helm_values "prometheus" | toYaml) (omit .Values.units.metallb.helmrelease_spec.values "prometheus" | toYaml) }}
{{ printf "The Helm values for Metallb are different between the cluster and the metallb unit. It is recomended to use `.Values.metallb_helm_values` to ensure consistency." | fail }}
{{- end }}
metallb_frrk8s_required: >-
{{- $metallb_frrk8s_required := false }}
{{- if (.Values.metallb | dig "bgp_lbs" "l3_options" "bgp_peers" "") }}
{{- range $_, $bgppeer := .Values.metallb.bgp_lbs.l3_options.bgp_peers }}
{{- if or $bgppeer.bfd_profile $bgppeer.receive_routes }}
{{- $metallb_frrk8s_required = true }}
{{- end }}
{{- end }}
{{- end }}
{{ tuple . $metallb_frrk8s_required | include "interpret-as-bool" }}
loadBalancerClass: \'{{ tuple . (eq .Values.cluster.capi_providers.bootstrap_provider "cabpr") "sylva.org/metallb-class" "kube-vip.io/kube-vip-class" | include "interpret-ternary" }}\'
kube_vip_service_annotations: \'{{ dict "kube-vip.io/loadbalancerIPs" .Values.cluster_virtual_ip | include "preserve-type" }}\'
lb_service_annotations: \'{{ tuple . (tuple . "metallb" | include "unit-enabled") .Values._internal.metallb_service_annotations .Values._internal.kube_vip_service_annotations | include "interpret-ternary" | include "preserve-type" }}\'
previous_values: \'{{ omit (lookup "v1" "Secret" .Release.Namespace "sylva-units-values" | dig "data" "values" "" | b64dec | fromYaml | default dict) "unit_templates" | include "preserve-type" }}\'
mgmt_cluster_default_os_selector: >-
{{- $selector := .Values.cluster | dig .Values.cluster.capi_providers.infra_provider "os_image_selector" dict | deepCopy -}}
{{- tuple .Values.cluster $selector | include "finalize-os-image-selector" | fromJson | include "preserve-type" -}}
immutable_values:
_internal:
default_storage_class:
_immutable: true
_immutable_comment: "default storage cannot be updated (this error may result from enabling a new CSI unit resulting in having the automatic computation of the default storage class give a different result; if you are in this case you can simply set \'default_storage_class: <current default storage class>\' in your environment values)"
cluster_domain:
_immutable: true
_immutable_comment: "The update of cluster domain is not supported"
cluster:
name:
_immutable: true
_immutable_comment: "Renaming a cluster is not supported"
cluster_virtual_ip:
# The cluster virtual IP cannot be changed
#
# In the case where this IP isn\'t set by user but is determined dynamically (the case on CAPO today, where capo-cluster-resources allocates it)
# then cluster_virtual_ip will not always contain the virtual IP, and may instead be set to its default value.
#
# Since this default value has changed (today it\'s "",in the past it was "55.55.55.55"), the immutability check
# is disabled in the case where the old value is 55.55.55.55 and the new one is "".
_immutable: >-
{{- tuple . "_internal.previous_values" | include "interpret" -}}
{{ not (and (.Values._internal.previous_values.cluster_virtual_ip | eq "55.55.55.55")
(.Values.cluster_virtual_ip | eq ""))
| include "preserve-type"
}}
_immutable_comment: "cluster_virtual_ip can\'t be changed once deployed"
capi_providers:
bootstrap_provider:
_immutable: true
_immutable_comment: "K8s distribution of a running cluster can\'t be changed"
infra_provider:
_immutable: true
_immutable_comment: "Underlying infrastructure of a running cluster can\'t be changed"
_immutable: \'{{ not .Values._internal.ha_cluster.is_ha | include "preserve-type" }}\'
_immutable_comment: "Cluster values can\'t be changed, as they would trigger a rolling upgrade that is not supported in non-HA mode"
longhorn:
helmrelease_spec:
values:
persistence:
defaultClassReplicaCount:
_immutable: true
_immutable_comment: "Default longhorn class replica count should not be changed as it impact longhorn upgrade"
pdb_allow_unhealthy_pod_eviction:
target:
kind: PodDisruptionBudget
patch: |-
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: not-used
spec:
unhealthyPodEvictionPolicy: AlwaysAllow
calico:
# for clusterDNS, take the .10 of the first subnet of cluster.cluster_services_cidrs
# FIXME: this will not work for subnets not starting at 0 (e.g. 192.168.1.128/25)
clusterDNS: \'{{ regexReplaceAll "[.]0/.*" (first .Values.cluster.cluster_services_cidrs) ".10" }}\'
clusterDomain: cluster.local
serviceCIDR: \'{{ first .Values.cluster.cluster_services_cidrs }}\'
base_cni_mtu_default: 1450
base_cni_mtu_per_infra:
capm3: |-
{{- tuple . "cluster.capm3.default_network_settings" | include "interpret" -}}
{{- $base_cni_mtu := .Values.cluster.capm3.default_network_settings.mtu -}}
{{- $control_plane_primary_interface := .Values.cluster | dig "control_plane" "capm3" "networks" "primary" "interface" .Values.cluster.capm3.networks.primary.interface -}}
{{- if not (eq $control_plane_primary_interface "") -}}
{{- $base_cni_mtu = tuple (dict "Values" (dict "capm3" .Values.cluster.capm3))
$control_plane_primary_interface
(.Values.cluster.control_plane.network_interfaces | dig $control_plane_primary_interface dict)
.Values.cluster.capm3.networks
.Values.cluster.control_plane.network_interfaces | include "calculate-mtu" | int -}}
{{- end -}}
{{- $base_cni_mtu | include "preserve-type" -}}
base_cni_mtu: >-
{{
.Values._internal.base_cni_mtu_per_infra
| dig .Values.cluster.capi_providers.infra_provider .Values._internal.base_cni_mtu_default
| include "preserve-type"
}}
calico_encapsulation_overhead: |-
{{- tuple . "_internal.base_cni_mtu" | include "interpret" -}}
{{- $encapsulation_header := dict
"ipip" 20
"ipipcrosssubnet" 20
"vxlan" 50
"vxlancrosssubnet" 50
"wireguard" 60
"none" 0
-}}
{{- $max_overhead := 0 -}}
{{- /* Iterate through all ipPools to find maximum encapsulation overhead */ -}}
{{- range .Values.calico_helm_values.installation.calicoNetwork.ipPools -}}
{{- if .encapsulation -}}
{{- $current_encapsulation := .encapsulation | lower -}}
{{- if hasKey $encapsulation_header $current_encapsulation -}}
{{- $max_overhead = max $max_overhead (index $encapsulation_header $current_encapsulation) -}}
{{- else -}}
{{- fail (printf "Unknown encapsulation type \'%s\' in calico_helm_values.installation.calicoNetwork.ipPools. Supported types are: %s" $current_encapsulation ((omit $encapsulation_header "wireguard") | keys | join ", ")) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* Check for wireguard if enabled */ -}}
{{- if .Values.security.calico_wireguard_enabled | default false -}}
{{- $max_overhead = max $max_overhead (index $encapsulation_header "wireguard") -}}
{{- end -}}
{{- $max_overhead | include "preserve-type" -}}
calico_mtu: |-
{{- /* Calculate final MTU */ -}}
{{- $mtu := sub .Values._internal.base_cni_mtu .Values._internal.calico_encapsulation_overhead -}}
{{- $mtu | include "preserve-type" -}}
# Substitute with the cluster_virtual_ip value if it is defined to another value than the default, mandatory with capm3
# Otherwise, the value is substituted from the capo-cluster-resources when using capo
cluster_virtual_ip: \'{{ tuple .Values.cluster_virtual_ip (ne .Values.cluster.capi_providers.infra_provider "capo") | include "set-only-if" }}\'
coredns:
# for clusterDNS, take the .10 of the first subnet of cluster.cluster_services_cidrs
# FIXME: this will not work for subnets not starting at 0 (e.g. 192.168.1.128/25)
clusterDNS: \'{{ regexReplaceAll "[.]0/.*" (first .Values.cluster.cluster_services_cidrs) ".10" }}\'
clusterDomain: cluster.local
serviceCIDR: \'{{ first .Values.cluster.cluster_services_cidrs }}\'
# The labels of coredns differ between rke2 and kubeadm
rke2_coredns_selector: app.kubernetes.io/instance == \'rke2-coredns\'
kubeadm_coredns_selector: k8s-app == \'kube-dns\'
coredns_selector: \'{{ (.Values.cluster.capi_providers.bootstrap_provider | eq "cabpr") | ternary .Values._internal.rke2_coredns_selector .Values._internal.kubeadm_coredns_selector }}\'
# renovate: datasource=docker
oci_tools_image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/oci-tools:0.3.1
# renovate: datasource=docker
keytool_image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/keytool-image:0.2.1
node_collector_volumes_default:
- name: var-lib-etcd
hostPath:
path: /var/lib/etcd
- name: var-lib-kubelet
hostPath:
path: /var/lib/kubelet
- name: var-lib-kube-scheduler
hostPath:
path: /var/lib/kube-scheduler
- name: var-lib-kube-controller-manager
hostPath:
path: /var/lib/kube-controller-manager
- name: etc-systemd
hostPath:
path: /etc/systemd
- name: lib-systemd
hostPath:
path: /lib/systemd
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d/
node_collector_volumes: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary (append .Values._internal.node_collector_volumes_default (dict "name" "var-lib-rancher-rke2" "hostPath" (dict "path" "/var/lib/rancher/rke2"))) .Values._internal.node_collector_volumes_default | include "preserve-type" }}\'
node_collector_volume_mounts_default:
- name: var-lib-etcd
mountPath: /var/lib/etcd
readOnly: true
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: var-lib-kube-scheduler
mountPath: /var/lib/kube-scheduler
readOnly: true
- name: var-lib-kube-controller-manager
mountPath: /var/lib/kube-controller-manager
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: lib-systemd
mountPath: /lib/systemd/
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: etc-cni-netd
mountPath: /etc/cni/net.d/
readOnly: true
node_collector_volume_mounts: \'{{ .Values.cluster.capi_providers.bootstrap_provider | eq "cabpr" | ternary (append .Values._internal.node_collector_volume_mounts_default (dict "name" "var-lib-rancher-rke2" "mountPath" "/var/lib/rancher/rke2" "readOnly" true)) .Values._internal.node_collector_volume_mounts_default | include "preserve-type" }}\'
mgmt_cluster_state_values:
cluster:
mgmt_cluster_ip: \'{{ .Values.display_external_ip }}\'
capm3:
image_provisioning_host: \'{{ .Values.display_external_ip }}\'
image_provisioning_scheme: \'{{ .Values.cluster.capm3.image_provisioning_scheme }}\'
proxies:
http_proxy: \'{{ .Values.proxies.http_proxy }}\'
https_proxy: \'{{ .Values.proxies.https_proxy }}\'
no_proxy: \'{{ include "sylva-units.no_proxy" (tuple .) }}\'
units:
cluster-import-init:
enabled: \'{{ tuple . "rancher" | include "unit-enabled" }}\'
cluster-import:
enabled: \'{{ tuple . "rancher" | include "unit-enabled" }}\'
cluster-import-check:
enabled: \'{{ tuple . "rancher" | include "unit-enabled" }}\'
monitoring:
thanos:
receive_url: "https://{{ .Values.external_hostnames.thanos_receive }}/api/v1/receive"
logging:
loki_url: "https://{{ .Values.external_hostnames.loki }}"
_internal:
workload_clusters_cluster_subdomain: wclusters.{{ .Values.cluster_domain }} # workload clusters cluster_domain will all be under wclusters.<cluster_domain>
sylva_mgmt_enabled_units: >-
{{- $units := dict -}}
{{- range $unit_name, $unit_def := .Values.units -}}
{{- if include "unit-enabled" (tuple $ $unit_name) -}}
{{- $_ := set $units $unit_name true -}}
{{- end -}}
{{- end -}}
{{- $units | include "preserve-type" -}}
mgmt_cluster_service_names: >-
{{- $mgmt_services := list -}}
{{- range $name, $hostname := .Values.external_hostnames }}
{{- if has $name (list
"loki"
"rancher"
"thanos_receive"
"harbor"
"keycloak"
) -}}
{{- $mgmt_services = append $mgmt_services $hostname -}}
{{- end -}}
{{- end -}}
{{- $mgmt_services | include "preserve-type" -}}
workload_clusters:
teams: {}
snmp:
devices: []
auth: {}
rancher:
auth_user_info_max_age_seconds: "0"
auth_user_info_resync_cron: "*/4 * * * *"
# Sample snmp configuration as it needs to be added in the secrets.yaml file
# snmp:
# devices:
# - alias: Server1
# target: 1.2.3.4
# module: dell_idrac
# auth: snmpv3
# cluster_name: \'Client1\'
# - alias: Server2
# target: 2.3.4.5
# module: hp_cpq
# auth: snmpv2
# cluster_name: \'Client2\'
# auth:
# snmpv3:
# version: 3
# community: public
# security_level: authPriv
# username: snmp
# password: xxxxx
# auth_protocol: SHA256
# priv_protocol: AES
# priv_password: xxxxx
# snmpv2:
# version: 2
# community: public
openshift:
assisted:
dbSize: 8Gi
fsSize: 8Gi
imagestoreSize: 10Gi
osImages:
- openshiftVersion: "4.17.0"
cpuArchitecture: "x86_64"
url: "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.17/4.17.0/rhcos-4.17.0-x86_64-live.x86_64.iso"
version: "4.17.0"
- openshiftVersion: "4.19"
cpuArchitecture: "x86_64"
url: "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.19/4.19.0/rhcos-live-iso.x86_64.iso"
version: "4.19.0"
# variable to specify the openshift assisted service hostname
# serviceHostname: openshift-assisted-service.example.com
# variable to specify the openshift assisted image service hostname
# imageHostname: openshift-assisted-images.example.com
# backup:
# timestamped: false # if true, a timestamp YYYYMMDDHHMM is added to the name of the backup files before uploading them to the target bucket. If false, a new backup will overwrite a previous uploaded one.
# store:
# s3:
# host: ""
# access_key: ""
# secret_key: ""
# bucket: ""
# cert: ""
';
preg_match_all($re, $str, $matches, PREG_SET_ORDER, 0);
// Print the entire match result
var_dump($matches);
Please keep in mind that these code samples are automatically generated and are not guaranteed to work. If you find any syntax errors, feel free to submit a bug report. For a full regex reference for PHP, please visit: http://php.net/manual/en/ref.pcre.php