import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Example {
public static void main(String[] args) {
final String regex = "\\s*registry:\\s*(?<registryUrl>.*)\\n\\s+repository:\\s+(?<depName>.*)\\n\\s+tag:\\s\\\"?(?<currentValue>[^\\\"\\n]+)\\\"?\\s*\\n";
final String string = "# Default values for sylva-units.\n"
+ "# This is a YAML-formatted file.\n"
+ "# Declare variables to be passed into your templates.\n\n"
+ "# generic helm chart release name overrides\n"
+ "nameOverride: \"\"\n"
+ "fullnameOverride: \"\"\n\n"
+ "git_repo_spec_default:\n"
+ " interval: 168h\n"
+ " timeout: 5m\n\n"
+ "oci_repo_spec_default:\n"
+ " interval: 168h\n"
+ " timeout: 5m\n\n"
+ "source_templates: # template to generate Flux GitRepository/OCIRepository resources\n"
+ " # <repo-name>:\n"
+ " # kind: GitRepository/OCIRepository\n"
+ " # spec: # partial spec for a Flux resource\n"
+ " # url: https://gitlab.com/sylva-projects/sylva-core.git\n"
+ " # #secretRef: # is autogenerated based on 'auth'\n"
+ " # ref: # can be overridden per-unit, with 'ref_override'\n"
+ " # branch: main\n"
+ " # auth: # optional 'username'/'password' dict containing git authentication information\n\n"
+ " sylva-core:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-core.git\n"
+ " ref:\n"
+ " # the sylva-core framework will always override this ref so that the\n"
+ " # currently checked out commit of sylva-core is used by sylva-units\n"
+ " # (you can grep the code for \"CURRENT_COMMIT\" to find out how)\n"
+ " commit: not-a-real-commit\n\n"
+ " weave-gitops:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/weaveworks/weave-gitops.git\n"
+ " ref:\n"
+ " tag: v0.38.0\n\n"
+ " sylva-capi-cluster:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-capi-cluster.git\n"
+ " ref:\n"
+ " tag: 0.12.4\n\n"
+ " sync-openstack-images:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sync-openstack-images.git\n"
+ " ref:\n"
+ " tag: 0.7.0\n\n"
+ " devnull:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/devnull.git\n"
+ " ref:\n"
+ " tag: 0.1.0\n\n"
+ " local-path-provisioner:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/rancher/local-path-provisioner.git\n"
+ " ref:\n"
+ " tag: v0.0.32\n\n"
+ " sriov-resources:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sriov-resources.git\n"
+ " ref:\n"
+ " tag: 0.0.5\n\n"
+ " metallb-resources:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/metallb-resources.git\n"
+ " ref:\n"
+ " tag: 0.2.1\n\n"
+ " os-image-server:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/os-image-server.git\n"
+ " ref:\n"
+ " tag: 2.6.0\n\n"
+ " capo-contrail-bgpaas:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/capo-contrail-bgpaas.git\n"
+ " ref:\n"
+ " tag: 1.3.0\n\n"
+ " libvirt-metal:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/container-images/libvirt-metal.git\n"
+ " ref:\n"
+ " tag: 0.3.0\n\n"
+ " vault-operator:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/bank-vaults/vault-operator.git\n"
+ " ref:\n"
+ " tag: v1.23.0\n\n"
+ " sylva-dashboards:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-dashboards.git\n"
+ " ref:\n"
+ " tag: 0.2.0\n\n"
+ " minio-operator:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/minio/operator.git\n"
+ " ref:\n"
+ " tag: v7.1.1\n\n"
+ " sylva-snmp-resources:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-snmp-resources.git\n"
+ " ref:\n"
+ " tag: 0.2.0\n\n"
+ " sylva-logging-flows:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows.git\n"
+ " ref:\n"
+ " tag: 0.2.0\n\n"
+ " sylva-prometheus-rules:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-prometheus-rules.git\n"
+ " ref:\n"
+ " tag: 0.2.1\n\n"
+ " sylva-thanos-rules:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-thanos-rules.git\n"
+ " ref:\n"
+ " tag: 0.3.0\n\n"
+ " sylva-alertmanager-resources:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-alertmanager-resources.git\n"
+ " ref:\n"
+ " tag: 0.2.0\n\n"
+ " workload-team-defs:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://gitlab.com/sylva-projects/sylva-elements/helm-charts/workload-team-defs.git\n"
+ " ref:\n"
+ " tag: 0.4.0\n\n"
+ " bitnami-postgresql:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/bitnami/charts.git\n"
+ " ref:\n"
+ " tag: postgresql/15.5.36\n\n"
+ " bitnami-postgresql-ha:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/bitnami/charts.git\n"
+ " ref:\n"
+ " tag: postgresql-ha/14.2.30\n\n"
+ " bitnami-thanos:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/bitnami/charts.git\n"
+ " ref:\n"
+ " tag: thanos/15.8.0\n\n"
+ " bitnami-redis-cluster:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/bitnami/charts.git\n"
+ " ref:\n"
+ " tag: redis-cluster/11.0.8\n\n"
+ " logging-chart-repository:\n"
+ " kind: GitRepository\n"
+ " spec:\n"
+ " url: https://github.com/kube-logging/logging-operator.git\n"
+ " ref:\n"
+ " tag: 6.0.2\n\n"
+ "helm_repo_spec_default:\n"
+ " interval: 168h\n"
+ " timeout: 5m\n\n"
+ "# this defines the default .spec for a Kustomization resource\n"
+ "# generated for each item of 'units'\n"
+ "unit_kustomization_spec_default: # default .spec for a Kustomization\n"
+ " force: false\n"
+ " prune: true\n"
+ " interval: 24h\n"
+ " retryInterval: 1m\n"
+ " timeout: 30s\n\n"
+ "# this defines the default .spec for a HelmRelease resource\n"
+ "# generated a unit with a \"helmrelease_spec\" field\n"
+ "unit_helmrelease_spec_default: # default for the .spec of a HelmRelease\n"
+ " driftDetection:\n"
+ " mode: enabled\n"
+ " interval: 24h\n"
+ " # the following dependsOn is used to prevent periodic reconciliation during the upgrades:\n"
+ " # As root-dependency-<n> HelmRelease will be pruned when root-dependency-<n+1> will be\n"
+ " # reconciled, HelmRelease reconciliation for a unit X will be blocked until the Kustomization\n"
+ " # controlling X reconciles and updates the dependsOn to refer to root-dependency-<n+1> HelmRelease\n"
+ " #\n"
+ " # (see also unit.root-dependency below)\n"
+ " dependsOn:\n"
+ " - >-\n"
+ " {{ tuple (dict \"name\" (printf \"root-dependency-%d\" .Release.Revision))\n"
+ " (tuple . \"root-dependency\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " upgrade:\n"
+ " crds: CreateReplace\n"
+ " maxHistory: 2\n\n"
+ "# this defines the default .spec for a Kustomization resource containing the HelmRelease resource\n"
+ "# generated by a unit with a \"helmrelease_spec\" field\n"
+ "unit_helmrelease_kustomization_spec_default:\n"
+ " path: ./kustomize-units/helmrelease-generic\n"
+ " sourceRef:\n"
+ " name: sylva-core\n"
+ " kind: '{{ index .Values.source_templates \"sylva-core\" | dig \"kind\" \"GitRepository\" }}'\n"
+ " wait: true\n\n"
+ "# default value used if units.xxx.enabled is not specified\n"
+ "units_enabled_default: false\n\n"
+ "# unit_template define unit settings\n"
+ "# a unit can inherit from multiple of those\n"
+ "unit_templates:\n"
+ " sylva-units: {} # Empty template that will be overwritten in use-oci-artifacts.values.yaml\n\n"
+ " # dummy unit template is used to have a Kustomization\n"
+ " # to add dependencies\n"
+ " dummy:\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/dummy/base\n"
+ " wait: false\n\n"
+ " namespace-defs:\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/namespace-defs\n"
+ " wait: true\n"
+ " prune: false\n"
+ " _components:\n"
+ " - '{{ tuple \"components/cinder-csi\" (tuple . \"cinder-csi\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/metal3\" (tuple . \"metal3\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/longhorn\" (tuple . \"longhorn\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/harbor\" (tuple . \"harbor\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/rancher\" (tuple . \"rancher\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/sriov\" (tuple . \"sriov-network-operator\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/cattle-monitoring\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/cattle-fleet\" (.Values._internal | dig \"sylva_mgmt_enabled_units\" \"rancher\" false) | include \"set-only-if\" }}' # effective only in workload clusters\n"
+ " - '{{ tuple \"components/ceph-csi-cephfs\" (tuple . \"ceph-csi-cephfs\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/gitea\" (tuple . \"gitea\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/minio-operator\" (tuple . \"minio-operator\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/minio-monitoring\" (tuple . \"minio-monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/minio-logging\" (tuple . \"minio-logging\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/thanos\" (tuple . \"thanos\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/loki\" (tuple . \"loki\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/kepler\" (tuple . \"kepler\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/kube-system\" (list \"cabpr\" \"cabpck\" | has .Values.cluster.capi_providers.bootstrap_provider) | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/neuvector\" (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/kunai\" (tuple . \"kunai\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/sbom-operator\" (tuple . \"sbom-operator\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/velero\" (tuple . \"velero\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/rancher-turtles\" (tuple . \"rancher-turtles\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/kube-logging\" (tuple . \"logging\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/trivy-operator\" (tuple . \"trivy-operator\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " flux-common:\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/flux-system/in-cluster\n"
+ " targetNamespace: flux-system\n"
+ " wait: true\n"
+ " # prevent Flux from uninstalling itself\n"
+ " prune: false\n"
+ " _components:\n"
+ " - '{{ tuple \"../components/extra-ca\" .Values.oci_registry_extra_ca_certs | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " var_substitution_enabled: \"true\" # To force substitution when configmap does not exist\n"
+ " EXTRA_CA_CERTS: '{{ tuple (.Values.oci_registry_extra_ca_certs | default \"\" | b64enc) .Values.oci_registry_extra_ca_certs | include \"set-only-if\" }}'\n"
+ " KUSTOMIZE_CONCURRENT: '{{ .Values.flux.kustomize.concurrent }}'\n"
+ " KUSTOMIZE_LOG_LEVEL: '{{ .Values.flux.kustomize.log_level }}'\n"
+ " KUSTOMIZE_FEATURES_GATES: '{{ tuple .Values.flux.kustomize.features_gates | include \"dict_to_key_values_separated_string\" }}'\n"
+ " HELM_CONCURRENT: '{{ .Values.flux.helm.concurrent }}'\n"
+ " HELM_LOG_LEVEL: '{{ .Values.flux.helm.log_level }}'\n"
+ " HELM_FEATURES_GATES: '{{ tuple .Values.flux.helm.features_gates | include \"dict_to_key_values_separated_string\" }}'\n"
+ " SOURCE_CONCURRENT: '{{ .Values.flux.source.concurrent }}'\n"
+ " SOURCE_LOG_LEVEL: '{{ .Values.flux.source.log_level }}'\n"
+ " SOURCE_FEATURES_GATES: '{{ tuple .Values.flux.source.features_gates | include \"dict_to_key_values_separated_string\" }}'\n"
+ " SOURCE_STORAGE_CLASS: >-\n"
+ " {{ tuple . (tuple . \"longhorn\" | include \"unit-enabled\") \"single-replica-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}\n"
+ " SOURCE_STORAGE_SIZE: 1Gi\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: proxy-env-vars\n"
+ " optional: true\n\n"
+ " # this unit template gather depends_on common\n"
+ " # to most units\n"
+ " base-deps:\n"
+ " # the actual dependencies aren't the same for bootstrap/management and workload-cluster\n"
+ " # the actual content is found in values files specific to each\n\n"
+ " # this, below, ensures that we avoid having units being updated in parallel\n"
+ " # with a CAPI node creation and/or rolling update:\n"
+ " # - the units that the 'cluster' unit depends on, directly or indirectly, will reconcile first\n"
+ " # - then 'cluster' unit will reconcile\n"
+ " # - then units on which the 'cluster' unit does not depend on, directly or indirectly, will\n"
+ " # reconcile after the \"cluster-machines-ready\" unit is ready\n"
+ " depends_on:\n"
+ " cluster-machines-ready: >-\n"
+ " {{\n"
+ " and (tuple . \"cluster-machines-ready\" | include \"unit-enabled\")\n"
+ " (not (.Values._internal.cluster_machines_ready_unit_deps | has .Values._unit_name_))\n"
+ " }}\n\n"
+ " # this unit template is used to define a unit inheriting from\n"
+ " # kustomize-units/kube-job\n"
+ " kube-job:\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kube-job\n"
+ " wait: true\n"
+ " force: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " JOB_NAME: '{{ .Values._kustomization_name_ }}'\n"
+ " NAMESPACE: '{{ .Release.Namespace }}'\n"
+ " JOB_CHECKSUM: '{{ list .Values .Values._internal.sylva_core_version | toJson | sha256sum }}' # can be overriden to use something more specific\n"
+ " _patches:\n"
+ " # the transformations below are necessary to change the ConfigMap name/namespace\n"
+ " # _because_ to avoid interferences between shell syntax and envsubst, we explicitely\n"
+ " # disable envsubst substitution in this resource (kustomize.toolkit.fluxcd.io/substitute: disabled)\n"
+ " - target:\n"
+ " kind: ConfigMap\n"
+ " labelSelector: kube-job=script\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /metadata/namespace\n"
+ " value: {{ .Values.units | dig .Values._unit_name_ \"kustomization_spec\" \"postBuild\" \"substitute\" \"NAMESPACE\" .Release.Namespace }}\n"
+ " - op: replace\n"
+ " path: /metadata/name\n"
+ " value: kube-job-{{ .Values._kustomization_name_ }}\n"
+ " - target:\n"
+ " kind: Job\n"
+ " patch: |\n"
+ " kind: Job\n"
+ " metadata:\n"
+ " name: _unused_\n"
+ " spec:\n"
+ " template:\n"
+ " spec:\n"
+ " volumes:\n"
+ " - name: script-volume\n"
+ " configMap:\n"
+ " name: kube-job-{{ .Values._kustomization_name_ }}\n\n"
+ " kube-cronjob:\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kube-cronjob\n"
+ " wait: true\n"
+ " force: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CRONJOB_NAME: '{{ .Values._kustomization_name_ }}'\n"
+ " NAMESPACE: '{{ .Release.Namespace }}'\n"
+ " JOB_CHECKSUM: '{{ list .Values .Values._internal.sylva_core_version | toJson | sha256sum }}' # can be overriden to use something more specific\n"
+ " _patches:\n"
+ " # the transformations below are necessary to change the ConfigMap name/namespace\n"
+ " # _because_ to avoid interferences between shell syntax and envsubst, we explicitely\n"
+ " # disable envsubst substitution in this resource (kustomize.toolkit.fluxcd.io/substitute: disabled)\n"
+ " - target:\n"
+ " kind: ConfigMap\n"
+ " labelSelector: kube-cronjob=script\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /metadata/namespace\n"
+ " value: {{ .Values.units | dig .Values._unit_name_ \"kustomization_spec\" \"postBuild\" \"substitute\" \"NAMESPACE\" .Release.Namespace }}\n"
+ " - op: replace\n"
+ " path: /metadata/name\n"
+ " value: kube-cronjob-{{ .Values._kustomization_name_ }}\n"
+ " - target:\n"
+ " kind: CronJob\n"
+ " patch: |\n"
+ " kind: CronJob\n"
+ " metadata:\n"
+ " name: _unused_\n"
+ " spec:\n"
+ " jobTemplate:\n"
+ " spec:\n"
+ " template:\n"
+ " spec:\n"
+ " volumes:\n"
+ " - name: script-volume\n"
+ " configMap:\n"
+ " name: kube-cronjob-{{ .Values._kustomization_name_ }}\n\n"
+ " backup-s3:\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " S3_HOST: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"host\" \"\" | b64enc }}'\n"
+ " S3_BUCKET: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"bucket\" \"\" | b64enc }}'\n"
+ " S3_REGION: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"region\" \"\" | b64enc }}'\n"
+ " S3_ACCESS_KEY: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"access_key\" \"\" | b64enc }}'\n"
+ " S3_SECRET_KEY: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"secret_key\" \"\" | b64enc }}'\n"
+ " S3_CERT: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"cert\" \"\" }}'\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NAMESPACE: '{{ .Release.Namespace }}'\n"
+ " _components:\n"
+ " - \"./components/backup-to-s3\"\n"
+ " - '{{ tuple \"./components/backup-to-s3/cert\" (hasKey ((.Values.backup | default dict) | dig \"store\" \"s3\" dict) \"cert\") | include \"set-only-if\" }}'\n\n"
+ " vault-template:\n"
+ " depends_on:\n"
+ " vault-init: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " vault-operator: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Vault UI can be reached at https://{{ .Values.external_hostnames.vault }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " sylvactl/unitTimeout: \"{{ printf \\\"%dm\\\" (mul 5 .Values.cluster.control_plane_replicas) }}\"\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " ADMIN_PASSWORD: '{{ .Values.admin_password }}'\n"
+ " KEY: '{{ .Values.external_certificates.vault.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/vault\n"
+ " _patches:\n"
+ " # Create a list of ACL and read roles, one per team expected to deploy workload clusters. The loop over .Values.workloadcluster.team is done in the Helm template\n"
+ " - target:\n"
+ " group: vault.banzaicloud.com\n"
+ " version: v1alpha1\n"
+ " kind: Vault\n"
+ " name: vault\n"
+ " # Empty patch is not allowed, hence the if (no workload cluster teams) then dummy patch (which is always true)\n"
+ " patch: |\n"
+ " {{- if .Values.workload_clusters.teams -}}\n"
+ " {{- range $name, $_ := .Values.workload_clusters.teams }}\n"
+ " - op: add\n"
+ " path: /spec/externalConfig/policies/-\n"
+ " value:\n"
+ " name: secret-reader-{{ $name }}\n"
+ " rules: |\n"
+ " path \"secret/data/{{ $name }}/*\" {\n"
+ " capabilities = [ \"read\", \"list\" ]\n"
+ " }\n"
+ " - op: add\n"
+ " path: /spec/externalConfig/policies/-\n"
+ " value:\n"
+ " name: secret-rw-{{ $name }}\n"
+ " rules: |\n"
+ " path \"secret/data/{{ $name }}/*\" {\n"
+ " capabilities = [ \"create\", \"update\", \"delete\", \"read\", \"list\" ]\n"
+ " }\n"
+ " - op: add\n"
+ " path: /spec/externalConfig/auth/0/roles/-\n"
+ " value:\n"
+ " name: secret-reader-{{ $name }}\n"
+ " bound_service_account_names: [secretstore-access-{{ $name }}]\n"
+ " bound_service_account_namespaces: [\"{{ $name }}\"]\n"
+ " policies: [ \"secret-reader-{{ $name }}\"]\n"
+ " ttl: 1h\n"
+ " {{- end}}\n"
+ " {{- else -}}\n"
+ " - op: test\n"
+ " path: /spec/externalConfig/secrets/0/type\n"
+ " value: kv\n"
+ " {{- end -}}\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " VAULT_DNS: '{{ .Values.external_hostnames.vault }}'\n"
+ " VAULT_REPLICAS: '{{ .Values._internal.vault_replicas }}'\n"
+ " MAX_POD_UNAVAILABLE: '{{ int .Values._internal.vault_replicas | eq 1 | ternary 0 1 }}'\n"
+ " AFFINITY: '{{ and (eq (int .Values.cluster.control_plane_replicas) 1) (ne .Values._internal.default_storage_class_unit \"local-path\") | ternary (.Values._internal.vault_no_affinity | toJson | indent 12 ) (.Values._internal.vault_affinity | toJson| indent 12) }}'\n"
+ " SERVICE: vault\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.vault }}'\n"
+ " CERT: '{{ .Values.external_certificates.vault.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}' # CA of the certificate injected to the local Vault\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " _components:\n"
+ " # generate certificate for external communitation\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.vault \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " healthChecks: # sometimes this kustomization seems correctly applied while vault pod is not running, see https://gitlab.com/sylva-projects/sylva-core/-/issues/250\n"
+ " # so we replace wait:true by checking for the Vault components health\n"
+ " - apiVersion: apps/v1\n"
+ " kind: StatefulSet\n"
+ " name: vault\n"
+ " namespace: vault\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: vault-operator\n"
+ " namespace: vault\n"
+ " - apiVersion: v1\n"
+ " kind: Service\n"
+ " name: vault-operator\n"
+ " namespace: vault\n"
+ " - apiVersion: v1\n"
+ " kind: Service\n"
+ " name: vault\n"
+ " namespace: vault\n"
+ " - apiVersion: v1\n"
+ " kind: Service\n"
+ " name: vault-configurer\n"
+ " namespace: vault\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: vault-configurer\n"
+ " namespace: vault\n\n"
+ "# unit_definition_defaults is merged with each \"units.x\" before\n"
+ "# it inherits from unit templates\n"
+ "unit_definition_defaults:\n"
+ " # the root-dependency wait job will wait on resources having this label\n"
+ " labels:\n"
+ " sylva-units/root-dependency-wait: \"\"\n"
+ " annotations:\n"
+ " sylva-units-helm-revision: '{{ .Release.Revision }}'\n"
+ " depends_on:\n"
+ " root-dependency: >-\n"
+ " {{\n"
+ " and\n"
+ " (tuple . \"root-dependency\" | include \"unit-enabled\")\n"
+ " (not (.Values._unit_name_ | eq \"root-dependency\"))\n"
+ " }}\n\n"
+ "# this defines Flux Kustomizations and the related ConfigMaps and Secrets\n"
+ "# for Helm-based unit, the Kustomization will produce a Flux HelmRelease\n"
+ "units:\n"
+ " # <unit-name>:\n"
+ " # info: # unit metadata mainly for documentation purpose\n"
+ " # description: <short unit description>\n"
+ " # details: <more detailed data about unit purpose and usage>\n"
+ " # maturity: <level of integration in sylva stack of corresponding component>\n"
+ " # internal: true <for units fully defined in sylva-core/kustomize-units without relying on external resources>\n"
+ " # version: <force declarative version, not recommended>\n"
+ " # enabled: boolean or GoTPL\n"
+ " # repo: <name of the repo under 'source_templates'> (for use with kustomization_spec)\n"
+ " # helm_repo_url: URL of the Helm repo to use (for use with helmrelease_spec, but not mandatory, 'repo' can be used as well to use a git repo)\n"
+ " # labels: (optional) dict holding labels to add to the resources for this unit\n"
+ " # ref_override: optional, if defined, this dict will be used for the GitRepository or OCIRepository overriding spec.ref (not used if some helm_repo_* is set)\n"
+ " # depends_on: dict defining the dependencies of this unit, keys are unit names, values are booleans\n"
+ " # (these dependencies are injected in the unit Kustomization via 'spec.dependsOn')\n"
+ " # suspend: true/false # (optional) if set to true the spec.suspend will be set on the main Flux resource for the unit\n"
+ " # # ie. the Kustomization for a pure-kustomize unit, or the HelmRelease for a Helm-based unit\n"
+ " # helmrelease_spec: # optionnal, contains a partial spec for a FluxCD HelmRelease, all the\n"
+ " # # key things are generated from unit_helmrelease_spec_default\n"
+ " # # and from other fields in the unit definition\n"
+ " # _postRenderers: # this field can be used in this file, it will be merged into user-provided 'postRenderers'\n"
+ " # helm_chart_artifact_name: optional, if specified, when deploying the Helm chart from an OCI artifact,\n"
+ " # helm_chart_artifact_name will be used as chart name instead of helmrelease_spec.chart.spec.chart last path item\n"
+ " # this is required if helmrelease_spec.chart.spec.chart is empty, '.' or '/'\n"
+ " # (also used by tools/oci/push-helm-chart to generate the artifact)\n"
+ " # helm_chart_versions: optional, if specified, when deploying the Helm chart from an OCI artifact or Helm registry,\n"
+ " # it will drive the version to be used from a dict of <version>:<boolean>\n"
+ " # in case if helmrelease_spec.chart.spec.chart.version is not set\n"
+ " # (also used by tools/oci/push-helm-charts-artifacts.py to generate the artifact)\n"
+ " # kustomization_spec: # contains a partial spec for a FluxCD Kustomization, most of the\n"
+ " # # things are generated from unit_kustomization_spec_default\n"
+ " # # sourceRef is generated from .git_repo field\n"
+ " # path: ./path-to-unit-under-repo\n"
+ " # # the final path will hence be:\n"
+ " # # - <git repo template>.spec.url + <unit>.spec.path (if <git repo template> has spec.url defined)\n"
+ " # _patches: # this field can be used in this file, it will be merged into user-provided 'patches'\n"
+ " # _components: # this field can be used in this file, it will be merged into user-provided 'components'\n"
+ " #\n"
+ " # helm_secret_values: # (dict), if set what is put here is injected in HelmRelease.valuesFrom as a Secret\n"
+ " # kustomization_substitute_secrets: # (dict), if set what is put here is injected in Kustomization.postBuild.substituteFrom as a Secret\n"
+ " # unit_templates: optional, list of names of \"unit templates\"\n"
+ " # unit templates are defined under \"unit_templates\"\n"
+ " # the settings for the unit are inherited from the corresponding entries under .Values.unit_templates,\n"
+ " # merging them in the specified order\n"
+ " # one_shot: true/false (default: false)\n"
+ " # This is used for units that need to run only once on a given cluster.\n"
+ " # A unit having \"one_shot: true\" will be automatically disabled if it has already\n"
+ " # run once on an application of sylva-units that ran to completion (until reconciliation\n"
+ " # of sylva-units-status).\n"
+ " # It is typically used for kube-job units:\n"
+ " # * that are needed to apply a given change for a sylva-upgrade\n"
+ " # * or that need to run at cluster creation time, but not after (eg. for metallb\n"
+ " # transitioning from RKE2-managed to Flux-managed)\n"
+ " # NOTE WELL: there is *no* guarantee that a one_shot unit will never be replayed (it will be replayed on a re-run after\n"
+ " # a sylva-units update that didn't complete)\n\n"
+ " root-dependency:\n"
+ " info:\n"
+ " description: special unit ensuring ordered updates of all Kustomizations\n"
+ " details: >-\n"
+ " All Kustomizations will depend on this Kustomization, whose name is `root-dependency-<n>`\n"
+ " and changes at each update of the sylva-units Helm release. This Kustomization does not become\n"
+ " ready before all other Kustomizations have been updated.\n\n"
+ " This unit also manages the `root-dependency-<n>` HelmRelease that acts as a lock to prevent\n"
+ " HelmReleases from reconciling before units they depend on are ready.\n\n"
+ " All this ensures in a race-free way that during an update, units will be reconciled in an\n"
+ " order matching dependency declarations.\n"
+ " internal: true\n"
+ " # Note:\n"
+ " # for this unit, the names of all resources produced must include .Release.Revision\n"
+ " # or another string that is guaranteed to change for each new sylva-units\n"
+ " # Helm release revision (or else the old and new Kustomization will control the same resource,\n"
+ " # which hence might get deleted when the old Kustomization is deleted), see use of .Values._kustomization_name_ below\n"
+ " #\n"
+ " # This unit is *both* a Helm-based unit (to produce the root-dependency-<n> HelmRelease)\n"
+ " # *AND* a Kustomize-based unit (to produce the kube-job running root-dependency-check.sh)\n"
+ " #\n"
+ " # This makes the definition of this unit wild and exotic.\n"
+ " # Don't use this unit to learn sylva-units !\n"
+ " #\n"
+ " unit_templates:\n"
+ " - kube-job\n"
+ " labels:\n"
+ " sylva-units/root-dependency-wait: null # cancel the label set in unit_definition_defaults, because the root-dependency wait job should _not_ wait for this unit\n"
+ " repo: devnull\n"
+ " helm_chart_artifact_name: devnull\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " interval: 168h\n"
+ " kustomization_name: root-dependency-{{ .Release.Revision }} # this makes the Kustomization name dynamic\n"
+ " kustomization_spec:\n"
+ " interval: 168h\n"
+ " # we need to specify the Kustomization path or else we'd have kustomize-units/kube-job\n"
+ " # inherited via kube-job unit template ...\n"
+ " path: ./kustomize-units/helmrelease-generic\n"
+ " # .. but then we still need to have the kube-job definition which we inject via a component\n"
+ " _components:\n"
+ " - ../kube-job-as-a-component\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " POD_ACTIVE_DEADLINE_SECONDS: \"120\" # if a single pod run of this Job was to take more time than that, it is probably stuck\n"
+ " # for root-dependency-check.sh kube-job\n"
+ " JOB_CHECKSUM: '{{ .Release.Revision }}'\n"
+ " RUNASUSER: '10000'\n"
+ " RUNASGROUP: '10000'\n"
+ " HELM_VERSION: '{{ .Release.Revision }}'\n"
+ " _patches:\n"
+ " # for root-dependency-check.sh kube-job\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict \"HELM_REVISION\" .Release.Revision )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/root-dependency-check.sh\") }}'\n"
+ " # for root-dependency-<n> HelmRelease\n"
+ " - target:\n"
+ " kind: HelmRelease\n"
+ " patch: |-\n"
+ " - op: replace\n"
+ " path: /metadata/name\n"
+ " value: root-dependency-{{ .Release.Revision }}\n"
+ " - op: add\n"
+ " path: /metadata/labels/sylva-units.version\n"
+ " value: \"{{ .Release.Revision }}\"\n"
+ " - op: remove\n"
+ " path: /spec/dependsOn\n\n"
+ " validating-admission-policies:\n"
+ " info:\n"
+ " description: configures validating admission policies\n"
+ " internal: true\n"
+ " unit_templates: []\n"
+ " depends_on: {}\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/validating-admission-policies\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/management-cluster-only\" .Values._internal.mgmt_cluster | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/deny-cabundle-changes\" (tuple . \"cert-manager\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " flux-system:\n"
+ " info:\n"
+ " description: contains Flux definitions *to manage the Flux system itself via gitops*\n"
+ " details: Note that Flux is always installed on the current cluster as a pre-requisite to installing the chart\n"
+ " maturity: core-component\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " repo: sylva-core\n"
+ " unit_templates: # we intendedly don't inherit from base-deps, because flux is itself part of base dependencies\n"
+ " - flux-common\n\n"
+ " cert-manager:\n"
+ " info:\n"
+ " description: installs cert-manager, an X.509 certificate controller\n"
+ " maturity: core-component\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://charts.jetstack.io\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: cert-manager\n"
+ " version: v1.18.2\n"
+ " targetNamespace: cert-manager\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " crds:\n"
+ " enabled: true\n"
+ " http_proxy: '{{ tuple .Values.proxies.http_proxy (hasKey .Values.security.external_x509_issuer \"issuer_type\") | include \"set-only-if\" }}' #proxy setting is required to reach external cert issuers\n"
+ " https_proxy: '{{ tuple .Values.proxies.https_proxy (hasKey .Values.security.external_x509_issuer \"issuer_type\") | include \"set-only-if\" }}'\n"
+ " no_proxy: '{{ tuple .Values.proxies.no_proxy (hasKey .Values.security.external_x509_issuer \"issuer_type\") | include \"set-only-if\" }}'\n"
+ " replicaCount: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " preferredDuringSchedulingIgnoredDuringExecution:\n"
+ " - weight: 100\n"
+ " podAffinityTerm:\n"
+ " labelSelector:\n"
+ " matchLabels:\n"
+ " app.kubernetes.io/instance: cert-manager\n"
+ " app.kubernetes.io/component: controller\n"
+ " topologyKey: kubernetes.io/hostname\n"
+ " podDisruptionBudget:\n"
+ " enabled: true\n"
+ " minAvailable: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}\n"
+ " webhook:\n"
+ " replicaCount: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 3 1 | include \"preserve-type\" }}\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " preferredDuringSchedulingIgnoredDuringExecution:\n"
+ " - weight: 100\n"
+ " podAffinityTerm:\n"
+ " labelSelector:\n"
+ " matchLabels:\n"
+ " app.kubernetes.io/instance: cert-manager\n"
+ " app.kubernetes.io/component: webhook\n"
+ " topologyKey: kubernetes.io/hostname\n"
+ " podDisruptionBudget:\n"
+ " enabled: true\n"
+ " minAvailable: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}\n"
+ " cainjector:\n"
+ " replicaCount: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " preferredDuringSchedulingIgnoredDuringExecution:\n"
+ " - weight: 100\n"
+ " podAffinityTerm:\n"
+ " labelSelector:\n"
+ " matchLabels:\n"
+ " app.kubernetes.io/instance: cert-manager\n"
+ " app.kubernetes.io/component: cainjector\n"
+ " topologyKey: kubernetes.io/hostname\n"
+ " podDisruptionBudget:\n"
+ " enabled: true\n"
+ " minAvailable: >-\n"
+ " {{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " kube-storage-version-migrator:\n"
+ " enabled: false\n"
+ " info:\n"
+ " description: installs kube-storage-version-migrator to assist apiVersion migrations\n"
+ " maturity: beta\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kube-storage-version-migrator\n"
+ " wait: true\n\n"
+ " trivy-operator:\n"
+ " info:\n"
+ " description: installs Trivy operator\n"
+ " maturity: beta\n"
+ " enabled: no\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://aquasecurity.github.io/helm-charts/\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: trivy-operator\n"
+ " version: 0.30.0\n"
+ " targetNamespace: trivy-system\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " image:\n"
+ " registry: docker.io\n"
+ " repository: aquasec/trivy-operator\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " runAsGroup: 10000\n"
+ " runAsUser: 10000\n"
+ " serviceAccount:\n"
+ " annotations: {}\n"
+ " create: true\n"
+ " name: trivy-operator\n"
+ " operator:\n"
+ " builtInTrivyServer: true\n"
+ " httpProxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " httpsProxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " noProxy: '{{ printf \"%s,%s\" (include \"sylva-units.no_proxy\" (tuple .)) \"trivy-service.trivy-system\" }}'\n"
+ " trivy:\n"
+ " image:\n"
+ " registry: ghcr.io\n"
+ " repository: aquasecurity/trivy\n"
+ " httpProxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " httpsProxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " noProxy: '{{ printf \"%s,%s\" (include \"sylva-units.no_proxy\" (tuple .)) \"trivy-service.trivy-system\" }}'\n"
+ " severity: UNKNOWN,HIGH,CRITICAL\n"
+ " dbRegistry: ghcr.io\n"
+ " dbRepository: aquasecurity/trivy-db\n"
+ " javaDbRegistry: ghcr.io\n"
+ " javaDbRepository: aquasecurity/trivy-java-db\n"
+ " insecureRegistries: '{{ .Values.security.trivy_operator.insecure_registries | include \"set-if-defined\" }}'\n"
+ " registry:\n"
+ " mirror: '{{ .Values.security.trivy_operator.mirrors | include \"set-if-defined\" }}'\n"
+ " policiesBundle:\n"
+ " registry: ghcr.io\n"
+ " repository: aquasecurity/trivy-checks\n"
+ " compliance:\n"
+ " specs:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary \"rke2-cis-1.24\" \"k8s-cis-1.23\" }}'\n"
+ " nodeCollector:\n"
+ " volumes: '{{ .Values._internal.node_collector_volumes | include \"preserve-type\" }}'\n"
+ " volumeMounts: '{{ .Values._internal.node_collector_volume_mounts | include \"preserve-type\" }}'\n"
+ " trivyOperator:\n"
+ " scanJobPodTemplatePodSecurityContext:\n"
+ " runAsGroup: 10000\n"
+ " runAsUser: 10000\n"
+ " scanJobPodTemplateContainerSecurityContext:\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " runAsGroup: 10000\n"
+ " runAsUser: 10000\n\n"
+ " sylva-ca:\n"
+ " info:\n"
+ " description: configures the Certificate Authority for units of the Sylva stack\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " VAULT_TOKEN: '{{ .Values.security.external_x509_issuer.vault_token | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/sylva-ca\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CA_CHAIN: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " ISSUER_SERVER: '{{ .Values.security.external_x509_issuer.server }}'\n"
+ " VAULT_PATH: '{{ .Values.security.external_x509_issuer.vault_path }}'\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: ClusterSecretStore\n"
+ " name: eso-store-k8s-cert-manager\n"
+ " patch: |\n"
+ " {{- if .Values.workload_clusters.teams -}}\n"
+ " {{- range $name, $_ := .Values.workload_clusters.teams }}\n"
+ " - op: add\n"
+ " path: /spec/conditions/0/namespaces/-\n"
+ " value:\n"
+ " {{ $name }}\n"
+ " {{- end -}}\n"
+ " {{- else -}}\n"
+ " - op: test\n"
+ " path: /kind\n"
+ " value: ClusterSecretStore\n"
+ " {{- end -}}\n\n\n\n"
+ " namespace-defs:\n"
+ " info:\n"
+ " description: creates sylva-system namespace and other namespaces to be used by various units\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - namespace-defs\n"
+ " depends_on:\n"
+ " # namespace-defs can't depend on Kyverno, because Kyverno depends on\n"
+ " # some namespaces\n"
+ " kyverno: false\n"
+ " kyverno-policies: false\n\n"
+ " cnpg-operator:\n"
+ " info:\n"
+ " description: Cloud Native PostgreSQL (CNPG) Operator\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ or (tuple . \"keycloak\" | include \"unit-enabled\")\n"
+ " (tuple . \"harbor\" | include \"unit-enabled\")\n"
+ " (tuple . \"gitea\" | include \"unit-enabled\")\n"
+ " (tuple . \"kunai\" | include \"unit-enabled\")\n"
+ " }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://cloudnative-pg.github.io/charts\n"
+ " helmrelease_spec:\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: cloudnative-pg\n"
+ " version: 0.26.0\n"
+ " targetNamespace: cnpg-system\n\n"
+ " metrics-server-ha:\n"
+ " info:\n"
+ " description: metrics-server configmap for ha values\n"
+ " maturity: stable\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " - '{{ .Values._internal.ha_cluster.is_ha }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " path: ./kustomize-units/metrics-server-ha\n"
+ " wait: true\n\n"
+ " metrics-server:\n"
+ " info:\n"
+ " description: metrics-server install\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " depends_on:\n"
+ " metrics-server-ha: '{{ .Values._internal.ha_cluster.is_ha }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://rke2-charts.rancher.io\n"
+ " helm_chart_versions:\n"
+ " 3.12.203: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}\n"
+ " 3.13.001: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " kind: APIService\n"
+ " name: v1beta1.metrics.k8s.io\n"
+ " paths:\n"
+ " - /metadata/annotations/meta.helm.sh~1release-name\n"
+ " - /metadata/labels/app.kubernetes.io~1instance\n"
+ " - /metadata/labels/helm.toolkit.fluxcd.io~1name\n"
+ " - /metadata/labels/helm.toolkit.fluxcd.io~1namespace\n"
+ " - /spec/service/name\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-metrics-server\n"
+ " version: \"\"\n"
+ " targetNamespace: kube-system\n"
+ " values:\n"
+ " nodeSelector:\n"
+ " kubernetes.io/os: linux\n"
+ " node-role.kubernetes.io/control-plane: \"true\"\n"
+ " valuesFrom: >-\n"
+ " {{\n"
+ " tuple (list (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"metrics-server-ha-values\"))\n"
+ " .Values._internal.ha_cluster.is_ha\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " keycloak-postgres:\n"
+ " info:\n"
+ " description: \"[Deprecated] Deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)\"\n"
+ " details: >\n"
+ " Legacy Unit that used to deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)\n"
+ " This unit will be removed once all deployments will be migrated to the new keycloak-postgresql (sylva 1.6+)\n"
+ " maturity: stable\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " # Enable this unit only if required to transition from 'keycloak-postgres' (historic unit with old settings) to 'keycloak-postgresql', that is:\n"
+ " # - If 'keycloak-postgres' unit was already successfully installed before\n"
+ " - '{{ lookup \"kustomize.toolkit.fluxcd.io/v1\" \"Kustomization\" \"sylva-system\" \"keycloak-postgres\" | dig \"status\" \"observedGeneration\" -1 | ne -1 }}'\n"
+ " # - Only if the reconciliation of 'keycloak-postgresql' (new unit) has never completed yet, meaning that the migration is not finished\n"
+ " - '{{ lookup \"kustomize.toolkit.fluxcd.io/v1\" \"Kustomization\" \"sylva-system\" \"keycloak-postgresql\" | dig \"status\" \"observedGeneration\" -1 | eq -1 }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " cnpg-operator: true\n"
+ " keycloak-init: true\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: 15m\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " KEYCLOAK_POSTGRES_REPLICAS: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " podAntiAffinityType: '{{ .Values._internal.ha_cluster.is_ha | ternary \"required\" \"preferred\" }}'\n"
+ " path: ./kustomize-units/keycloak-postgres\n"
+ " _components:\n"
+ " - '{{ tuple \"./components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"./components/keycloak-upgrade-db\" (and .Values._internal.state.is_upgrade (tuple . \"keycloak\" | include \"unit-enabled\")) | include \"set-only-if\" }}'\n"
+ " # Add WAL dedicated PVC only if it was previously defined, in order to prevent this postgresql cluster from being reconfigured\n"
+ " # when sylva is upgraded from 1.4, just before this unit will be replaced by keycloak-postgres one.\n"
+ " - >-\n"
+ " {{- if (lookup \"apiextensions.k8s.io/v1\" \"CustomResourceDefinition\" \"\" \"clusters.postgresql.cnpg.io\") }}\n"
+ " {{- tuple\n"
+ " \"./components/wal-pvc\"\n"
+ " (lookup \"postgresql.cnpg.io/v1\" \"Cluster\" \"keycloak\" \"cnpg-keycloak\" | dig \"spec\" \"walStorage\" \"size\" \"\" | eq \"2Gi\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " {{- else }}\n"
+ " {{- tuple \"\" false | include \"set-only-if\" }}\n"
+ " {{- end }}\n"
+ " healthChecks:\n"
+ " - apiVersion: postgresql.cnpg.io/v1\n"
+ " kind: Cluster\n"
+ " name: cnpg-keycloak\n"
+ " namespace: keycloak\n"
+ " healthCheckExprs:\n"
+ " # CNPG does not expose a kstatus compatible status (missing observedGeneration), we use status.phase instead\n"
+ " # It seems to report an accurate view, see https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.27.0/api/v1/cluster_types.go#L642\n"
+ " - apiVersion: postgresql.cnpg.io/v1\n"
+ " kind: Cluster\n"
+ " current: status.phase == \"Cluster in healthy state\"\n"
+ " failed: status.phase != \"Cluster in healthy state\"\n\n"
+ " kubevirt:\n"
+ " info:\n"
+ " description: installs kubevirt\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://suse-edge.github.io/charts\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: kubevirt\n"
+ " version: 0.6.0\n"
+ " targetNamespace: kubevirt\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " kubevirt:\n"
+ " configuration:\n"
+ " vmRolloutStrategy: \"LiveUpdate\"\n"
+ " developerConfiguration:\n"
+ " featureGates:\n"
+ " - NUMA\n"
+ " - CPUManager\n"
+ " - Snapshot\n"
+ " - ExpandDisks\n"
+ " - VMExport\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " kubevirt-test-vms-remove:\n"
+ " info:\n"
+ " description: remove kubevirt test vms before upgrade\n"
+ " internal: true\n"
+ " one_shot: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kubevirt-test-vms\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values._internal.state.is_upgrade }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: kubevirt-tests\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/kubevirt-delete-vm.sh\") }}'\n\n"
+ " kubevirt-test-vms:\n"
+ " info:\n"
+ " description: deploys kubevirt VMs for testing\n"
+ " internal: true\n"
+ " test: true\n"
+ " enabled_conditions:\n"
+ " - '{{ gt (int .Values._internal.worker_node_count) 0 }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " kubevirt: true\n"
+ " multus: true\n"
+ " kubevirt-test-vms-remove: '{{ tuple . \"kubevirt-test-vms-remove\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kubevirt-test-vms\n"
+ " wait: true\n"
+ " targetNamespace: kubevirt-tests\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CPU_CORE: '1'\n"
+ " REQUESTED_MEMORY: '128Mi'\n"
+ " # renovate: datasource=docker\n"
+ " IMAGE: quay.io/kubevirt/cirros-container-disk-demo:v1.6.1\n"
+ " CLOUD_INIT_NO_CLOUD: SGkuXG4=\n\n"
+ " kubevirt-manager-deployment-remove:\n"
+ " info:\n"
+ " description: remove kubevirt manager deployment before upgrade\n"
+ " internal: true\n"
+ " one_shot: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kubevirt-manager\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values._internal.state.is_upgrade }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: kubevirt-manager\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/kubevirt-manager-deployment-delete.sh\") }}'\n\n"
+ " kubevirt-manager:\n"
+ " info:\n"
+ " description: deploys kubevirt-manager UI for kubevirt workloads\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kubevirt\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " kubevirt-manager-deployment-remove: '{{ tuple . \"kubevirt-manager-deployment-remove\" | include \"unit-enabled\" }}'\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " ADMIN_CREDENTIAL: '{{ (htpasswd .Values._internal.kubevirt_admin_user .Values._internal.kubevirt_admin_password) | b64enc | quote }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Kubevirt manager UI can be reached at https://{{ .Values.external_hostnames.kubevirt_manager }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kubevirt-manager\n"
+ " wait: true\n"
+ " targetNamespace: kubevirt-manager\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: kubevirt-manager\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.kubevirt_manager }}'\n"
+ " CERTIFICATE_NAMESPACE: kubevirt-manager\n"
+ " CERT: '{{ .Values.external_certificates.kubevirt_manager.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.kubevirt_manager \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: Deployment\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 30000\n"
+ " runAsUser: 10000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n\n"
+ " kubevirt-cdi:\n"
+ " info:\n"
+ " description: manages Kubevirt CDI - Container Data Importer\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kubevirt\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://suse-edge.github.io/charts\n"
+ " helm_chart_artifact_name: kubevirt-cdi\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: cdi\n"
+ " version: 0.4.0\n"
+ " targetNamespace: kubevirt-cdi\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " cdi:\n"
+ " config:\n"
+ " featureGates:\n"
+ " - HonorWaitForFirstConsumer\n"
+ " importProxy:\n"
+ " HTTPProxy: '{{ get .Values.proxies \"http_proxy\" }}'\n"
+ " HTTPSProxy: '{{ get .Values.proxies \"https_proxy\" }}'\n"
+ " noProxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n\n"
+ " harbor-init:\n"
+ " info:\n"
+ " description: sets up Harbor prerequisites\n"
+ " details: it generates namespace, certificate, admin password, OIDC configuration\n"
+ " internal: true\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " vault-init: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"harbor\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.harbor.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/harbor-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " HARBOR_DNS: '{{ .Values.external_hostnames.harbor }}'\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " SERVICE: harbor\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.harbor }}'\n"
+ " CERTIFICATE_NAMESPACE: harbor\n"
+ " CERT: '{{ .Values.external_certificates.harbor.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.harbor \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " harbor-postgres:\n"
+ " info:\n"
+ " description: installs Postgresql for Harbor\n"
+ " maturity: stable\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " harbor-init: '{{ tuple . \"harbor-init\" | include \"unit-enabled\" }}' # conditional, because in workload clusters harbor-init isn't used\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"harbor\" | include \"unit-enabled\" }}'\n"
+ " repo: bitnami-postgresql\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: bitnami/postgresql\n"
+ " targetNamespace: harbor\n"
+ " values:\n"
+ " image:\n"
+ " repository: bitnamilegacy/postgresql\n"
+ " metrics:\n"
+ " image:\n"
+ " repository: bitnamilegacy/postgres-exporter\n"
+ " volumePermissions:\n"
+ " image:\n"
+ " repository: bitnamilegacy/os-shell\n"
+ " auth:\n"
+ " username: harbor\n"
+ " database: harbor\n"
+ " existingSecret: harbor-postgres-secrets\n"
+ " secretKeys:\n"
+ " adminPasswordKey: admin-password\n"
+ " userPasswordKey: password\n"
+ " replicationPasswordKey: replication-password\n"
+ " architecture: replication\n"
+ " nameOverride: postgres\n"
+ " serviceAccount:\n"
+ " create: true\n"
+ " name: postgresql\n"
+ " primary:\n"
+ " pdb:\n"
+ " create: false # this is non-HA and single replica, so a PDB does not make sense\n"
+ " extendedConfiguration: |-\n"
+ " huge_pages = off\n"
+ " initdb:\n"
+ " args: \"--set huge_pages=off\"\n"
+ " readReplicas:\n"
+ " extendedConfiguration: |-\n"
+ " huge_pages = off\n"
+ " replicaCount: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " requiredDuringSchedulingIgnoredDuringExecution:\n"
+ " - labelSelector:\n"
+ " matchLabels:\n"
+ " app.kubernetes.io/name: postgres\n"
+ " app.kubernetes.io/component: read\n"
+ " topologyKey: kubernetes.io/hostname\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " harbor:\n"
+ " info:\n"
+ " description: installs Harbor\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " harbor-init: '{{ tuple . \"harbor-init\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " harbor-postgres: true\n"
+ " monitoring-crd: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Harbor UI can be reached at https://{{ .Values.external_hostnames.harbor }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " helm_repo_url: https://helm.goharbor.io\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: harbor\n"
+ " version: 1.17.1\n"
+ " targetNamespace: harbor\n"
+ " values:\n"
+ " updateStrategy:\n"
+ " type: '{{ eq .Values._internal.harbor_storage_access_mode \"ReadWriteMany\" | ternary \"RollingUpdate\" \"Recreate\" }}'\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " resourcePolicy: \"keep\"\n"
+ " persistentVolumeClaim:\n"
+ " registry:\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " #size: 32Gi\n"
+ " accessMode: '{{ .Values._internal.harbor_storage_access_mode }}'\n"
+ " jobservice:\n"
+ " jobLog:\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " #size: 8Gi\n"
+ " accessMode: '{{ .Values._internal.harbor_storage_access_mode }}'\n"
+ " redis:\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " #size: 8Gi\n"
+ " #trivy:\n"
+ " #storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " #size: 8Gi\n"
+ " externalURL: 'https://{{ .Values.external_hostnames.harbor }}'\n"
+ " existingSecretAdminPassword: '{{ tuple \"harbor-init\" (tuple . \"harbor-init\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " expose:\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " className: nginx\n"
+ " hosts:\n"
+ " core: '{{ .Values.external_hostnames.harbor }}'\n"
+ " tls:\n"
+ " enabled: true\n"
+ " certSource: secret\n"
+ " secret:\n"
+ " secretName: harbor-tls\n"
+ " database:\n"
+ " type: external\n"
+ " external:\n"
+ " host: harbor-postgres-primary.harbor.svc.cluster.local\n"
+ " port: \"5432\"\n"
+ " username: \"harbor\"\n"
+ " coreDatabase: \"harbor\"\n"
+ " existingSecret: \"harbor-postgres-secrets\"\n"
+ " notary:\n"
+ " enabled: false\n"
+ " trivy:\n"
+ " enabled: false\n"
+ " proxy:\n"
+ " httpProxy: '{{ get .Values.proxies \"http_proxy\" }}'\n"
+ " httpsProxy: '{{ get .Values.proxies \"https_proxy\" }}'\n"
+ " noProxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " metrics:\n"
+ " enabled: true\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " install:\n"
+ " createNamespace: true\n\n"
+ " vault-init:\n"
+ " info:\n"
+ " description: creates vault namespace\n"
+ " details: this unit creates the requirements to deploy vault\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"vault\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/namespace-defs/baseline-namespace\n"
+ " targetNamespace: vault\n"
+ " wait: true\n\n"
+ " vault-operator:\n"
+ " info:\n"
+ " description: installs Vault operator\n"
+ " maturity: stable\n"
+ " depends_on:\n"
+ " vault-init: true\n"
+ " repo: vault-operator\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: deploy/charts/vault-operator\n"
+ " targetNamespace: vault\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " image:\n"
+ " repository: ghcr.io/bank-vaults/vault-operator\n"
+ " tag: '{{ .Values.source_templates | dig \"vault-operator\" \"spec\" \"ref\" \"tag\" \"\" | required \"source_templates.vault-operator.spec.ref.tag is unset\" }}'\n\n"
+ " openbao-set-service-label:\n"
+ " info:\n"
+ " description: Kyverno policy to add the vault-active pod label according to the presence of the openbao-active pod label\n"
+ " details: |\n"
+ " This policy set the vault-active pod label on the openbao pods according to the presence of the openbao-active pod label, so\n"
+ " that the vault service created by the vault-operator can select the active pod instance.\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.security.secret_manager.variant | eq \"openbao\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/openbao-set-service-label\n"
+ " wait: true\n\n"
+ " openbao:\n"
+ " info:\n"
+ " description: installs Openbao\n"
+ " details: |\n"
+ " Openbao assumes that the certificate vault-tls has been issued\n"
+ " maturity: stable\n"
+ " # renovate: datasource=docker depname=ghcr.io/openbao/openbao\n"
+ " version: 2.4.1\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - vault-template\n"
+ " depends_on:\n"
+ " # in addition to the dependencies defined in vault-template\n"
+ " openbao-set-service-label: '{{ tuple . \"openbao-set-service-label\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.security.secret_manager.variant | eq \"openbao\" }}'\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " VAULT_IMAGE: ghcr.io/openbao/openbao:{{ .Values.units | dig \"openbao\" \"info\" \"version\" \"undefined\" }}\n"
+ " _patches:\n"
+ " - patch: |\n"
+ " apiVersion: vault.banzaicloud.com/v1alpha1\n"
+ " kind: Vault\n"
+ " metadata:\n"
+ " name: vault\n"
+ " spec:\n"
+ " securityContext:\n"
+ " fsGroup: 1000\n"
+ " runAsGroup: 1000\n"
+ " runAsUser: 100\n"
+ " configPath: /openbao/config\n"
+ " - target:\n"
+ " kind: Vault\n"
+ " name: vault\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /spec/vaultEnvsConfig/-\n"
+ " value:\n"
+ " name: BAO_K8S_POD_NAME\n"
+ " value: $(POD_NAME)\n"
+ " - op: add\n"
+ " path: /spec/vaultEnvsConfig/-\n"
+ " value:\n"
+ " name: BAO_CACERT\n"
+ " value: /vault/tls/ca.crt\n"
+ " - op: add\n"
+ " path: /spec/vaultEnvsConfig/-\n"
+ " value:\n"
+ " name: BAO_CLUSTER_ADDR\n"
+ " value: http://$(POD_NAME):8201\n"
+ " - op: add\n"
+ " path: /spec/vaultEnvsConfig/-\n"
+ " value:\n"
+ " name: SKIP_CHOWN\n"
+ " value: \"true\"\n"
+ " - op: add\n"
+ " path: /spec/vaultLabels/variant\n"
+ " value: openbao\n"
+ " - op: remove\n"
+ " path: /spec/securityContext/runAsNonRoot\n\n"
+ " hashicorp-vault:\n"
+ " info:\n"
+ " description: installs Hashicorp Vault\n"
+ " details: |\n"
+ " Vault assumes that the certificate vault-tls has been issued\n"
+ " maturity: stable\n"
+ " # renovate: datasource=docker depname=hashicorp/vault\n"
+ " version: 1.13.13\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - vault-template\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.security.secret_manager.variant | eq \"vault\" }}'\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " VAULT_IMAGE: hashicorp/vault:{{ .Values.units | dig \"hashicorp-vault\" \"info\" \"version\" \"undefined\" }}\n\n"
+ " vault:\n"
+ " info:\n"
+ " description: ensures that a secret store is installed\n"
+ " details: |\n"
+ " either hashicorp-vault or openbao is installed\n"
+ " maturity: stable\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - dummy\n"
+ " depends_on:\n"
+ " hashicorp-vault: '{{ .Values.security.secret_manager.variant | eq \"vault\" }}'\n"
+ " openbao: '{{ .Values.security.secret_manager.variant | eq \"openbao\" }}'\n\n"
+ " vault-config-operator:\n"
+ " info:\n"
+ " description: installs Vault config operator\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " vault-init: true\n"
+ " cert-manager: true\n"
+ " monitoring: '{{ .Values.units | dig \"vault-config-operator\" \"helmrelease_spec\" \"values\" \"enableMonitoring\" true }}'\n"
+ " helm_repo_url: https://redhat-cop.github.io/vault-config-operator\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: vault-config-operator\n"
+ " version: v0.8.33\n"
+ " targetNamespace: vault\n"
+ " values:\n"
+ " enableCertManager: true\n"
+ " enableMonitoring: false\n\n"
+ " vault-secrets:\n"
+ " info:\n"
+ " description: generates random secrets in vault, configure password policy, authentication backends, etc...\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " vault: true\n"
+ " vault-config-operator: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/vault-secrets\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " _components:\n"
+ " - '{{ tuple \"components/keycloak\" (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " vault-oidc:\n"
+ " info:\n"
+ " description: configures Vault to be used with OIDC\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " keycloak-resources: true\n"
+ " vault: true\n"
+ " vault-config-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/vault-oidc\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " VAULT_DNS: '{{ .Values.external_hostnames.vault }}'\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n\n"
+ " external-secrets-operator:\n"
+ " info:\n"
+ " description: installs the External Secrets operator\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://charts.external-secrets.io\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: external-secrets\n"
+ " version: 0.20.1\n"
+ " targetNamespace: external-secrets\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " installCRDs: true\n\n"
+ " eso-secret-stores:\n"
+ " info:\n"
+ " description: defines External Secrets stores\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " external-secrets-operator: true\n"
+ " vault: true\n"
+ " keycloak-init: '{{tuple . \"keycloak\" | include \"unit-enabled\" }}' # keycloak namespace required for keycloak component\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/eso-secret-stores\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/keycloak\" (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " cis-operator-crd:\n"
+ " info:\n"
+ " description: install CIS operator CRDs\n"
+ " maturity: stable\n"
+ " hidden: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"cis-operator\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://charts.rancher.io\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-cis-benchmark-crd\n"
+ " version: 106.0.0+up8.0.0\n"
+ " targetNamespace: cis-operator-system\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " cis-operator:\n"
+ " info:\n"
+ " description: install CIS operator\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " depends_on:\n"
+ " cis-operator-crd: true\n"
+ " helm_repo_url: https://charts.rancher.io\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-cis-benchmark\n"
+ " version: 106.0.0+up8.0.0\n"
+ " targetNamespace: cis-operator-system\n\n"
+ " cis-operator-scan:\n"
+ " info:\n"
+ " description: allows for running a CIS scan for management cluster\n"
+ " details: |\n"
+ " it generates a report which can be viewed and downloaded in CSV from the Rancher UI, at https://rancher.sylva/dashboard/c/local/cis/cis.cattle.io.clusterscan\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"cis-operator\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " cis-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cis-operator-scan\n"
+ " wait: false\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SCAN_PROFILE: '{{ .Values.cis_benchmark_scan_profile }}'\n\n"
+ " neuvector-init:\n"
+ " info:\n"
+ " description: sets up Neuvector prerequisites\n"
+ " details: |\n"
+ " it generates certificate, admin password, policy exception for using latest tag images (required for the pod managing the database of vulnerabilities since this DB is updated often)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"neuvector\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " vault: true\n"
+ " vault-config-operator: true\n"
+ " kyverno: true\n"
+ " keycloak-add-client-scope: true\n"
+ " keycloak-oidc-external-secrets: true\n"
+ " kyverno-policies-ready: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.neuvector.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/neuvector-init\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: neuvector-init\n"
+ " namespace: neuvector\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: neuvector-oidc-init\n"
+ " namespace: neuvector\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NEUVECTOR_DNS: '{{ .Values.external_hostnames.neuvector }}'\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " SERVICE: neuvector\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.neuvector }}'\n"
+ " CERTIFICATE_NAMESPACE: neuvector\n"
+ " CERT: '{{ .Values.external_certificates.neuvector.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.neuvector \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " neuvector:\n"
+ " info:\n"
+ " description: installs Neuvector\n"
+ " maturity: beta\n"
+ " enabled: no\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " neuvector-init: '{{ tuple . \"neuvector-init\" | include \"unit-enabled\" }}'\n"
+ " ingress-nginx: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Neuvector UI can be reached at https://{{ .Values.external_hostnames.neuvector }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " helm_repo_url: https://neuvector.github.io/neuvector-helm\n"
+ " helm_chart_artifact_name: neuvector-core\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: core\n"
+ " version: 2.8.3\n"
+ " targetNamespace: neuvector\n"
+ " values:\n"
+ " leastPrivilege: true\n"
+ " internal:\n"
+ " certmanager:\n"
+ " enabled: true\n"
+ " secretname: neuvector-internal\n"
+ " autoGenerateCert: false\n"
+ " controller:\n"
+ " replicas: 1 # PVC only works for 1 replica https://github.com/neuvector/neuvector-helm/issues/110#issuecomment-1251921734\n"
+ " strategy:\n"
+ " type: RollingUpdate\n"
+ " rollingUpdate:\n"
+ " maxSurge: 1\n"
+ " maxUnavailable: 1\n"
+ " image:\n"
+ " repository: neuvector/controller\n"
+ " internal:\n"
+ " certificate:\n"
+ " secret: neuvector-internal\n"
+ " pvc:\n"
+ " enabled: true # setting PVC to true imposes 1 replica https://github.com/neuvector/neuvector-helm/issues/110#issuecomment-1251921734\n"
+ " accessModes:\n"
+ " - ReadWriteOnce\n"
+ " enforcer:\n"
+ " image:\n"
+ " repository: neuvector/enforcer\n"
+ " internal:\n"
+ " certificate:\n"
+ " secret: neuvector-internal\n"
+ " manager:\n"
+ " image:\n"
+ " repository: neuvector/manager\n"
+ " runAsUser: \"10000\"\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " host: '{{ .Values.external_hostnames.neuvector }}'\n"
+ " ingressClassName: nginx\n"
+ " path: /\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/backend-protocol: https\n"
+ " tls: true\n"
+ " secretName: neuvector-tls\n"
+ " cve:\n"
+ " updater:\n"
+ " podLabels:\n"
+ " tag-validating-policy.sylva.io: excluded\n"
+ " podAnnotations:\n"
+ " kube-score/ignore: container-image-tag\n"
+ " enabled: '{{ .Values.security.neuvector_scanning_enabled | include \"preserve-type\" }}'\n"
+ " image:\n"
+ " repository: neuvector/updater\n"
+ " scanner:\n"
+ " podLabels:\n"
+ " tag-validating-policy.sylva.io: excluded\n"
+ " podAnnotations:\n"
+ " kube-score/ignore: container-image-tag\n"
+ " enabled: '{{ .Values.security.neuvector_scanning_enabled | include \"preserve-type\" }}'\n"
+ " image:\n"
+ " repository: neuvector/scanner\n"
+ " env:\n"
+ " - name: https_proxy\n"
+ " value: '{{ .Values.proxies.https_proxy }}'\n"
+ " - name: no_proxy\n"
+ " value: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " internal:\n"
+ " certificate:\n"
+ " secret: neuvector-internal\n"
+ " containerd:\n"
+ " enabled: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" | include \"as-bool\" }}'\n"
+ " path: /var/run/containerd/containerd.sock\n"
+ " k3s:\n"
+ " enabled: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | include \"as-bool\" }}'\n"
+ " runtimePath: /run/k3s/containerd/containerd.sock\n"
+ " resources:\n"
+ " limits:\n"
+ " cpu: 400m\n"
+ " memory: 2792Mi\n"
+ " requests:\n"
+ " cpu: 100m\n"
+ " memory: 2280Mi\n"
+ " _postRenderers: |\n"
+ " {{- $patch := `\n"
+ " kustomize:\n"
+ " patches:\n"
+ " - target:\n"
+ " kind: CronJob\n"
+ " name: neuvector-updater-pod\n"
+ " patch: |-\n"
+ " - op: replace\n"
+ " path: /spec/startingDeadlineSeconds\n"
+ " value: 21600\n"
+ " - op: add\n"
+ " path: /metadata/labels/tag-validating-policy.sylva.io\n"
+ " value: excluded\n"
+ " - op: add\n"
+ " path: /spec/jobTemplate/spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 10000\n"
+ " runAsUser: 10000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " - target:\n"
+ " kind: Deployment\n"
+ " name: neuvector-scanner-pod\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /metadata/labels/tag-validating-policy.sylva.io\n"
+ " value: excluded\n"
+ " ` -}}\n"
+ " {{- if .Values.security.neuvector_scanning_enabled -}}\n"
+ " {{- list ($patch | fromYaml) | include \"preserve-type\" -}}\n"
+ " {{- else -}}\n"
+ " {{- list | include \"preserve-type\" -}}\n"
+ " {{- end -}}\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: neuvector-manager-pod\n"
+ " namespace: neuvector\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: neuvector-controller-pod\n"
+ " namespace: neuvector\n\n"
+ " crossplane-init:\n"
+ " info:\n"
+ " description: sets up Crossplane prerequisites\n"
+ " details: |\n"
+ " it generates CA certificate secret to be used by 'crossplane' unit\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"crossplane\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " keycloak: '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/crossplane-init\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CERTIFICATE_NAMESPACE: crossplane-system\n"
+ " _components:\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " - '{{ tuple \"./components/providers/keycloak\" (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/crossplane-monitoring\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: sylva-ca.crt\n"
+ " namespace: crossplane-system\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"namespace\" \"crossplane-system\" \"name\" \"keycloak-provider-secret\") (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"namespace\" \"crossplane-system\" \"name\" \"keycloak-internal-tls\") (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"namespace\" \"sylva-system\" \"name\" \"keycloak-bootstrap-admin\") (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " crossplane-provider-keycloak:\n"
+ " info:\n"
+ " description: Deploys Crossplane Keycloak Provider\n"
+ " details: |\n"
+ " Deploys keycloak-provider and installs CRD's\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"crossplane\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " crossplane: true\n"
+ " keycloak: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/crossplane-provider-keycloak\n"
+ " healthChecks:\n"
+ " - apiVersion: pkg.crossplane.io/v1\n"
+ " kind: Provider\n"
+ " name: crossplane-contrib-provider-keycloak\n\n"
+ " crossplane:\n"
+ " info:\n"
+ " description: Installs Crossplane with RBAC Manager\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " crossplane-init: true\n"
+ " monitoring-crd: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://charts.crossplane.io/stable\n"
+ " helmrelease_spec:\n"
+ " releaseName: crossplane\n"
+ " targetNamespace: crossplane-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: crossplane\n"
+ " version: 2.0.2\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " provider:\n"
+ " packages:\n"
+ " - xpkg.upbound.io/crossplane-contrib/provider-keycloak:v2.1.0\n"
+ " extraVolumesCrossplane:\n"
+ " - name: sylva-ca-crt\n"
+ " secret:\n"
+ " secretName: sylva-ca.crt\n"
+ " extraVolumeMountsCrossplane:\n"
+ " - name: sylva-ca-crt\n"
+ " mountPath: /etc/ssl/certs/ca.crt\n"
+ " subPath: ca.crt\n"
+ " extraEnvVarsCrossplane:\n"
+ " HTTP_PROXY: '{{ .Values.proxies.http_proxy }}'\n"
+ " HTTPS_PROXY: '{{ .Values.proxies.https_proxy }}'\n"
+ " NO_PROXY: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " rbacManager:\n"
+ " deploy: true\n"
+ " skipAggregatedClusterRoles: false\n"
+ " replicas: 1\n"
+ " leaderElection: true\n"
+ " securityContextCrossplane:\n"
+ " runAsUser: 65532\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 65532\n"
+ " allowPrivilegeEscalation: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " securityContextRBACManager:\n"
+ " runAsNonRoot: true\n"
+ " runAsUser: 65532\n"
+ " runAsGroup: 65532\n"
+ " allowPrivilegeEscalation: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " metrics:\n"
+ " enabled: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n\n"
+ " keycloak-init:\n"
+ " info:\n"
+ " description: creates keycloak namespace\n"
+ " details: this unit creates the requirements to deploy keycloak\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/namespace-defs/standard-namespace\n"
+ " targetNamespace: keycloak\n"
+ " wait: true\n\n"
+ " keycloak:\n"
+ " info:\n"
+ " description: initializes and configures Keycloak\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " keycloak-init: true\n"
+ " sylva-ca: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " keycloak-add-truststore: '{{ tuple . \"keycloak-add-truststore\" | include \"unit-enabled\" }}'\n"
+ " keycloak-postgres: '{{ tuple . \"keycloak-postgres\" | include \"unit-enabled\" }}'\n"
+ " keycloak-postgresql: '{{ tuple . \"keycloak-postgresql\" | include \"unit-enabled\" }}'\n"
+ " synchronize-secrets: true # make sure that the secret keycloak-bootstrap-admin is ready to be consummed\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Keycloak admin console can be reached at https://{{ .Values.external_hostnames.keycloak }}/admin/master/console, user 'admin', password in Vault at secret/keycloak ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.keycloak.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/keycloak\n"
+ " targetNamespace: keycloak\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " KEYCLOAK_REPLICAS: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " AFFINITY: '{{ .Values._internal.ha_cluster.is_ha | ternary (.Values._internal.keycloak_affinity | toJson) \"{}\" }}'\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " SERVICE: keycloak\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " CERT: '{{ .Values.external_certificates.keycloak.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: proxy-env-vars\n"
+ " healthChecks: # using only \"wait: true\" would not give us visibility on the status of the StatefulSet\n"
+ " # the Keycloak StatefulSet set is produced, by the combination of Keycloak operator\n"
+ " # and a Keycloak custom resource, it relies on the postgres DB also deployed by this unit\n"
+ " # hence, checking for the health of this component can be done by checking this StatefulSet\n"
+ " - apiVersion: apps/v1\n"
+ " kind: StatefulSet\n"
+ " name: keycloak\n"
+ " namespace: keycloak\n"
+ " # the rest are objects created by the Kustomization\n"
+ " - apiVersion: k8s.keycloak.org/v2alpha1\n"
+ " kind: Keycloak\n"
+ " name: keycloak\n"
+ " namespace: keycloak\n"
+ " - apiVersion: cert-manager.io/v1\n"
+ " kind: Certificate\n"
+ " name: keycloak-internal-certificate\n"
+ " namespace: keycloak\n"
+ " - apiVersion: networking.k8s.io/v1\n"
+ " kind: Ingress\n"
+ " name: keycloak-ingress\n"
+ " namespace: keycloak\n"
+ " _components:\n"
+ " - '{{ tuple \"components/keycloak-operator-proxies\" (.Values.proxies.https_proxy) | include \"set-only-if\" }}'\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.keycloak \"cert\") }}'\n"
+ " - '{{ tuple \"components/keycloak-truststore\" (hasKey .Values.external_certificates \"cacert\") | include \"set-only-if\" }}'\n"
+ " _patches:\n"
+ " - patch: |\n"
+ " - op: replace\n"
+ " path: /spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 1000\n"
+ " runAsUser: 1000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " target:\n"
+ " kind: Deployment\n"
+ " name: keycloak-operator\n\n"
+ " keycloak-postgresql:\n"
+ " info:\n"
+ " description: Deploy Postgres cluster for Keycloak using Cloud Native PostgreSQL (CNPG)\n"
+ " maturity: stable\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " cnpg-operator: true\n"
+ " keycloak-init: true\n"
+ " keycloak-postgres: '{{ tuple . \"keycloak-postgres\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/keycloak-postgresql\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " replicas: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " podAntiAffinityType: '{{ .Values._internal.ha_cluster.is_ha | ternary \"required\" \"preferred\" }}'\n"
+ " _components:\n"
+ " # Add component to import data from the db managed by the deprecated unit ...\n"
+ " - '{{ tuple \"./components/keycloak-import-db\" (tuple . \"keycloak-postgres\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " healthChecks:\n"
+ " - apiVersion: postgresql.cnpg.io/v1\n"
+ " kind: Cluster\n"
+ " name: keycloak-postgresql\n"
+ " namespace: keycloak\n"
+ " healthCheckExprs:\n"
+ " # CNPG does not expose a kstatus compatible status (missing observedGeneration), we use status.phase instead\n"
+ " # It seems to report an accurate view, see https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.27.0/api/v1/cluster_types.go#L642\n"
+ " - apiVersion: postgresql.cnpg.io/v1\n"
+ " kind: Cluster\n"
+ " current: status.phase == \"Cluster in healthy state\"\n"
+ " failed: status.phase != \"Cluster in healthy state\"\n\n"
+ " keycloak-legacy-operator:\n"
+ " info:\n"
+ " description: installs Keycloak \"legacy\" operator\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " vault-secrets: true # the credential-external-keycloak Secret use by the legacy operator is generated from ES/Vault secret/data/keycloak\n"
+ " keycloak: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/keycloak-legacy-operator\n"
+ " targetNamespace: keycloak\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " wait: true\n"
+ " _patches:\n"
+ " - patch: |\n"
+ " apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " metadata:\n"
+ " name: keycloak-realm-operator\n"
+ " spec:\n"
+ " template:\n"
+ " spec:\n"
+ " containers:\n"
+ " - name: keycloak-realm-operator\n"
+ " securityContext:\n"
+ " runAsUser: 10000\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " target:\n"
+ " kind: Deployment\n"
+ " name: keycloak-realm-operator\n\n"
+ " keycloak-resources:\n"
+ " info:\n"
+ " description: configures keycloak resources\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " keycloak: true\n"
+ " keycloak-legacy-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " SSO_PASSWORD: '{{ .Values.admin_password }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/keycloak-resources\n"
+ " targetNamespace: keycloak\n"
+ " _components:\n"
+ " - '{{ tuple \"components/neuvector\" (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/harbor\" (tuple . \"harbor\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/grafana\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/kunai\" (tuple . \"kunai\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " GRAFANA_DNS: '{{ .Values.external_hostnames.grafana }}'\n"
+ " RANCHER_DNS: '{{ .Values.external_hostnames.rancher }}'\n"
+ " FLUX_WEBUI_DNS: '{{ .Values.external_hostnames.flux }}'\n"
+ " HARBOR_DNS: '{{ .Values.external_hostnames.harbor }}'\n"
+ " NEUVECTOR_DNS: '{{ tuple .Values.external_hostnames.neuvector (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " VAULT_DNS: '{{ .Values.external_hostnames.vault }}'\n"
+ " EXPIRE_PASSWORD_DAYS: '{{ int .Values.keycloak.keycloak_expire_password_days }}'\n"
+ " KUNAI_DNS: '{{ tuple .Values.external_hostnames.kunai (tuple . \"kunai\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " healthChecks: # cannot use __wait: true__ here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/144\n"
+ " - apiVersion: legacy.k8s.keycloak.org/v1alpha1\n"
+ " kind: KeycloakRealm\n"
+ " name: sylva\n"
+ " namespace: keycloak\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: keycloak-client-secret-rancher-client # this secret is a byproduct of the rancher-client KeycloakClient resource\n"
+ " namespace: keycloak\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: keycloak-client-secret-flux-webui-client # this secret is a byproduct of the flux-webui-client KeycloakClient resource\n"
+ " namespace: keycloak\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"name\" \"keycloak-client-secret-neuvector-client\" \"namespace\" \"keycloak\") (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}' # # this secret is a byproduct of the neuvector client KeycloakClient resource\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"name\" \"keycloak-client-secret-harbor-client\" \"namespace\" \"keycloak\") (tuple . \"harbor\" | include \"unit-enabled\") | include \"set-only-if\" }}' # # this secret is a byproduct of the harbor client KeycloakClient resource\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"name\" \"keycloak-client-secret-grafana-client\" \"namespace\" \"keycloak\") (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}' # # this secret is a byproduct of the grafana client KeycloakClient resource\n"
+ " - '{{ tuple (dict \"apiVersion\" \"v1\" \"kind\" \"Secret\" \"name\" \"keycloak-client-secret-kunai-client\" \"namespace\" \"keycloak\") (tuple . \"kunai\" | include \"unit-enabled\") | include \"set-only-if\" }}' # # this secret is a byproduct of the grafana client KeycloakClient resource\n\n\n"
+ " keycloak-add-client-scope:\n"
+ " info:\n"
+ " description: configures Keycloak client-scope\n"
+ " details: >\n"
+ " a job to manually add a custom client-scope to sylva realm (on top of default ones)\n"
+ " while CRD option does not yet provide good results (overrides defaults)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " keycloak-resources: true\n"
+ " keycloak: true # defines the keycloak-bootstrap-admin Secret used by the script\n"
+ " kustomization_spec:\n"
+ " targetNamespace: keycloak\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " RUNASUSER: '10000'\n"
+ " RUNASGROUP: '10000'\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/keycloak-add-client-scope.sh\") }}'\n\n"
+ "# Workaround for realm role creation\n"
+ " keycloak-add-realm-role:\n"
+ " info:\n"
+ " description: Creates Keycloak realm role\n"
+ " details: >\n"
+ " a job to manually create a custom realm role to sylva realm (on top of default ones) and assigns it to sylva-admin\n"
+ " while CRD option does not allow updates.\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " keycloak-resources: true\n"
+ " keycloak: true # defines the keycloak-bootstrap-admin Secret used by the script\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NAMESPACE: keycloak\n"
+ " RUNASUSER: '10000'\n"
+ " RUNASGROUP: '10000'\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/keycloak-add-realm-role.sh\") }}'\n\n"
+ " keycloak-add-truststore:\n"
+ " info:\n"
+ " description: configures Keycloak truststore\n"
+ " details: >\n"
+ " a job to manually add a truststore to Keycloak instance, e.h. to enable LDAPS protocol when using user federation)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " - '{{ hasKey .Values.external_certificates \"cacert\" }}'\n"
+ " depends_on:\n"
+ " vault: true\n"
+ " keycloak-init: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NAMESPACE: keycloak\n"
+ " CERTIFICATE_NAMESPACE: keycloak\n"
+ " _components:\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-image-patch\" .Values._internal.keytool_image }}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/keycloak-add-truststore.sh\") }}'\n\n"
+ " keycloak-oidc-external-secrets:\n"
+ " info:\n"
+ " description: configures OIDC secrets for Keycloak\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " keycloak: true\n"
+ " keycloak-legacy-operator: true\n"
+ " keycloak-resources: true\n"
+ " eso-secret-stores: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/keycloak-oidc-external-secrets\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " FLUX_WEBUI_DNS: '{{ .Values.external_hostnames.flux }}'\n"
+ " wait: false\n"
+ " healthChecks:\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"apiVersion\" \"v1\"\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"grafana-oidc\"\n"
+ " \"namespace\" \"cattle-monitoring-system\"\n"
+ " )\n"
+ " (tuple . \"monitoring\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " # this secret is a byproduct of the eso-grafana-oidc ExternalSecret resource\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"apiVersion\" \"v1\"\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"oidc-auth\"\n"
+ " \"namespace\" \"flux-system\"\n"
+ " )\n"
+ " (tuple . \"flux-webui\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " # this secret is a byproduct of the eso-flux-webui-oidc ExternalSecret resource\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"apiVersion\" \"v1\"\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"keycloakoidcconfig-clientsecret\"\n"
+ " \"namespace\" \"cattle-global-data\"\n"
+ " )\n"
+ " (tuple . \"rancher\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\" }}\n"
+ " # this secret is a byproduct of the eso-rancher-oidc ExternalSecret resource\n"
+ " _components:\n"
+ " - '{{ tuple \"components/grafana-oidc\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/flux-webui-oidc\" (tuple . \"flux-webui\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/rancher-oidc\" (tuple . \"rancher\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " sbom-operator:\n"
+ " info:\n"
+ " description: installs SBOM operator\n"
+ " maturity: beta\n"
+ " enabled: no\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " helm_repo_url: https://ckotzbauer.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: sbom-operator\n"
+ " version: 0.36.0\n"
+ " targetNamespace: sbom-operator\n"
+ " values:\n"
+ " args:\n"
+ " targets: '{{ .Values.security.sbom_operator.targets | join \",\" }}'\n"
+ " dtrack-base-url: '{{ tuple .Values.security.sbom_operator.dtrack_base_url (hasKey .Values.security.sbom_operator \"dtrack_base_url\") | include \"set-only-if\" }}'\n"
+ " oci-registry: '{{ tuple .Values.security.sbom_operator.oci_registry (hasKey .Values.security.sbom_operator \"dtrack_base_url\") | include \"set-only-if\" }}'\n"
+ " format: cyclonedx\n"
+ " cron: \"*/30 * * * *\" # the cron value allows the operator to periodically checks for changed images in the cluster\n"
+ " envVars:\n"
+ " - name: https_proxy\n"
+ " value: '{{ .Values.proxies.https_proxy }}'\n"
+ " - name: http_proxy\n"
+ " value: '{{ .Values.proxies.https_proxy }}'\n"
+ " - name: no_proxy\n"
+ " value: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " helm_secret_values:\n"
+ " args:\n"
+ " dtrack-api-key: '{{ tuple .Values.security.sbom_operator.dtrack_api_key (hasKey .Values.security.sbom_operator \"dtrack-api-key\") | include \"set-only-if\" }}'\n"
+ " oci-user: '{{ tuple .Values.security.sbom_operator.oci_user (hasKey .Values.security.sbom_operator \"oci-user\") | include \"set-only-if\" }}'\n"
+ " oci-token: '{{ tuple .Values.security.sbom_operator.oci_token (hasKey .Values.security.sbom_operator \"oci-token\") | include \"set-only-if\" }}'\n\n"
+ " kyverno:\n"
+ " info:\n"
+ " description: installs Kyverno\n"
+ " maturity: core-component\n"
+ " unit_templates: [] # we intendedly don't inherit from base-deps, because kyverno is itself part of base dependencies\n"
+ " depends_on:\n"
+ " monitoring-crd: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://kyverno.github.io/kyverno\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: kyverno\n"
+ " version: 3.4.4\n"
+ " targetNamespace: kyverno\n"
+ " timeout: 10m\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " config:\n"
+ " resourceFiltersExclude:\n"
+ " - \"[Node,*,*]\"\n"
+ " - \"[Node/*,*,*]\"\n"
+ " # ##### how we exclude kube-system from Kyverno webhooks (but not completely) ###########################\n"
+ " # #\n"
+ " # It is a generalized safeguard to not let Kyverno define webhooks on kube-system namespace. #\n"
+ " # This is the default Kyverno chart configuration (namespaceSelector under config.webhook). #\n"
+ " # #\n"
+ " # In Sylva we have policy for which we *need* some validation webhooks in kube-system: #\n"
+ " # * rke2-helmchart-prevent-uninstall #\n"
+ " # * block-rke2-uninstall-jobs policies #\n"
+ " # .\n"
+ " # This comment groups the different things done to allow excluding the kube-system namespace\n"
+ " # except for Jobs.\n"
+ " #\n"
+ " # Instead of using a 'namespaceSelector' we use global 'matchConditions'.\n"
+ " # .\n"
+ " # /!\\ There is however a side effect: policies that use spec.webhookConfiguration.matchConditions\n"
+ " # will **not** exclude kube-system (this field overrides the global 'matchConditions').\n"
+ " # So any policy using spec.webhookConfiguration.matchConditions needs to include a CEL expression\n"
+ " # to exclude the kube-system namespace.\n"
+ " #\n"
+ " - \"[*/*,kube-system,*]\" # cancels this entry which is present in chart default resourceFilters\n"
+ " #\n"
+ " webhooks:\n"
+ " # cancels the namespaceSelector present in default values that excludes the kube-system namespace.\n"
+ " # This has the side effect of ignoring excludeKyvernoNamespace so we have to explicitly set it here.\n"
+ " namespaceSelector:\n"
+ " matchExpressions:\n"
+ " - key: kubernetes.io/metadata.name\n"
+ " operator: NotIn\n"
+ " values:\n"
+ " - kyverno\n"
+ " matchConditions: # ... and readds it via matchConditions to exclude kube-system :\n"
+ " # # but, as said above, this applies only to policies that do not set webhookConfiguration #\n"
+ " # # (CEL expression below tests ns exclusion except for non-namespaced resources) #\n"
+ " - expression: 'has(request.namespace) ? (request.namespace != \"kube-system\") : true' #\n"
+ " name: ns-not-kube-system-global #\n"
+ " # #########################################################################################################\n"
+ " webhooksCleanup:\n"
+ " enabled: false\n"
+ " image:\n"
+ " repository: alpine/kubectl\n"
+ " tag: 1.34.1\n"
+ " features:\n"
+ " policyExceptions:\n"
+ " enabled: true\n"
+ " namespace: \"*\"\n"
+ " admissionController:\n"
+ " crdWatcher: true\n"
+ " replicas: '{{ .Values._internal.default_replicas | include \"preserve-type\" }}'\n"
+ " rbac:\n"
+ " clusterRole:\n"
+ " extraResources:\n"
+ " - apiGroups: [\"metal3.io\"]\n"
+ " resources: [\"baremetalhosts\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"minio.min.io\"]\n"
+ " resources: [\"tenants\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"*\"]\n"
+ " resources: [ \"secrets\" ]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"crd.projectcalico.org\"]\n"
+ " resources:\n"
+ " - globalnetworksets\n"
+ " verbs: [\"create\", \"update\", \"patch\", \"delete\", \"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"ipam.metal3.io\"]\n"
+ " resources:\n"
+ " - ippools\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"infrastructure.cluster.x-k8s.io\"]\n"
+ " resources:\n"
+ " - openstackmachines\n"
+ " - metal3machines\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"helm.toolkit.fluxcd.io\"]\n"
+ " resources:\n"
+ " - helmreleases\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"\"]\n"
+ " resources:\n"
+ " - configmaps\n"
+ " verbs: [\"get\", \"list\", \"patch\", \"update\", \"watch\", \"create\", \"delete\"]\n"
+ " - apiGroups: [\"apps\"]\n"
+ " resources:\n"
+ " - daemonsets\n"
+ " verbs: [\"get\", \"list\", \"patch\", \"update\", \"watch\", \"create\"]\n"
+ " - apiGroups: [\"apiextensions.k8s.io\"]\n"
+ " resources: [\"customresourcedefinitions\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " container:\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 1024Mi\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " cleanupController:\n"
+ " replicas: '{{ .Values._internal.default_replicas | include \"preserve-type\" }}'\n"
+ " rbac:\n"
+ " clusterRole:\n"
+ " extraResources:\n"
+ " - apiGroups: [\"\"]\n"
+ " resources:\n"
+ " - nodes\n"
+ " - secrets\n"
+ " - services\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\", \"delete\"]\n"
+ " - apiGroups: [\"longhorn.io\"]\n"
+ " resources:\n"
+ " - engineimages\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\", \"delete\"]\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " reportsController:\n"
+ " replicas: '{{ .Values._internal.default_replicas | include \"preserve-type\" }}'\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 256Mi\n"
+ " requests:\n"
+ " memory: 128Mi\n"
+ " rbac:\n"
+ " clusterRole:\n"
+ " extraResources:\n"
+ " - apiGroups: [\"*\"]\n"
+ " resources: [\"*\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " backgroundController:\n"
+ " replicas: '{{ .Values._internal.default_replicas | include \"preserve-type\" }}'\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 1024Mi\n"
+ " requests:\n"
+ " memory: 256Mi\n"
+ " resyncPeriod: 5m\n"
+ " rbac:\n"
+ " clusterRole:\n"
+ " extraResources:\n"
+ " - apiGroups: [\"\"]\n"
+ " resources:\n"
+ " - \"secrets\"\n"
+ " verbs: [\"get\", \"list\", \"patch\", \"update\", \"watch\", \"create\", \"delete\"]\n"
+ " - apiGroups: [\"apps\"]\n"
+ " resources:\n"
+ " - daemonsets\n"
+ " verbs: [\"get\", \"list\", \"patch\", \"update\", \"watch\", \"create\"]\n"
+ " - apiGroups: [\"management.cattle.io\"] # for set-monitoring-chart-cluster-id ClusterPolicy\n"
+ " resources:\n"
+ " - \"clusters\"\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"provisioning.cattle.io\"] # for set-monitoring-chart-cluster-id ClusterPolicy\n"
+ " resources:\n"
+ " - \"clusters\"\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"batch\"]\n"
+ " resources:\n"
+ " - \"cronjobs\"\n"
+ " - \"jobs\"\n"
+ " verbs: [\"get\", \"list\", \"patch\", \"update\", \"watch\"]\n"
+ " - apiGroups: [\"minio.min.io\"]\n"
+ " resources: [\"tenants\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"apiextensions.k8s.io\"]\n"
+ " resources: [\"customresourcedefinitions\"]\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"vault.banzaicloud.com\"]\n"
+ " resources:\n"
+ " - \"vaults\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"apps\"]\n"
+ " resources:\n"
+ " - \"statefulsets\"\n"
+ " - \"deployments\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"helm.toolkit.fluxcd.io\"]\n"
+ " resources:\n"
+ " - \"helmreleases\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"external-secrets.io\"]\n"
+ " resources:\n"
+ " - \"externalsecrets\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"unitsoperator.sylva\"]\n"
+ " resources:\n"
+ " - \"sylvaunitsreleasetemplates\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\", \"create\", \"delete\"]\n"
+ " - apiGroups: [\"cluster.x-k8s.io\"]\n"
+ " resources:\n"
+ " - machines\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"\"]\n"
+ " resources:\n"
+ " - nodes\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"\"]\n"
+ " resources:\n"
+ " - serviceaccounts\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"metallb.io\"]\n"
+ " resources:\n"
+ " - ipaddresspools\n"
+ " - l2advertisements\n"
+ " - bgppeers\n"
+ " - bgpadvertisements\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"crd.projectcalico.org\"]\n"
+ " resources:\n"
+ " - globalnetworksets\n"
+ " verbs: [\"create\", \"update\", \"patch\", \"delete\", \"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"infrastructure.cluster.x-k8s.io\"]\n"
+ " resources:\n"
+ " - openstackmachines\n"
+ " - metal3machines\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"ipam.metal3.io\"]\n"
+ " resources:\n"
+ " - ippools\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"crd.projectcalico.org\"]\n"
+ " resources:\n"
+ " - ippools\n"
+ " verbs: [\"get\", \"list\", \"watch\"]\n"
+ " - apiGroups: [\"metal3.io\"]\n"
+ " resources:\n"
+ " - baremetalhosts\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " - apiGroups: [\"longhorn.io\"]\n"
+ " resources:\n"
+ " - \"volumes\"\n"
+ " verbs: [\"get\", \"list\", \"watch\", \"patch\", \"update\"]\n"
+ " # for rke2-helmchart-prevent-uninstall policy:\n"
+ " - >-\n"
+ " {{\n"
+ " tuple (dict \"apiGroups\" (list \"helm.cattle.io\")\n"
+ " \"resources\" (list \"helmcharts\")\n"
+ " \"verbs\" (list \"get\" \"list\" \"watch\" \"patch\" \"update\")\n"
+ " )\n"
+ " (eq .Values.cluster.capi_providers.bootstrap_provider \"cabpr\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " policyReportsCleanup:\n"
+ " image:\n"
+ " repository: alpine/kubectl\n"
+ " tag: 1.34.1\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " kyverno-policies:\n"
+ " info:\n"
+ " description: configures Kyverno policies\n"
+ " internal: true\n"
+ " unit_templates: [] # we intendedly don't inherit from base-deps, because kyverno-policies is itself part of base dependencies\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/generic\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLUSTER_NAME: '{{ .Values.cluster.name }}'\n"
+ " SIGN_MGMT: '{{ .Values.security.upstream_images_signature.policy_action }}'\n"
+ " SIG_REPO: '{{ .Values.security.upstream_images_signature.repository }}'\n"
+ " DIGEST: '{{ .Values.security.upstream_images_signature.policy_action | eq \"Enforce\" }}'\n"
+ " COSIGN_PUBLIC_KEY: '{{ .Values.security.upstream_images_signature.cosign_public_key | quote }}'\n"
+ " IMAGE_LIST: '{{ .Values.security.upstream_images_signature.images_list | toJson }}'\n"
+ " COREDNS_DEPLOYMENT_NAME: '{{ tuple (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" | ternary \"coredns\" \"rke2-coredns-rke2-coredns\") (tuple . \"cabpoa\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " COREDNS_CONFIGMAP_NAME: '{{ tuple (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" | ternary \"coredns\" \"rke2-coredns-rke2-coredns\") (tuple . \"cabpoa\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " _components:\n"
+ " - '{{ tuple \"components/bootstrap-cluster-only\" .Values._internal.bstrp_cluster | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/management-cluster-only\" .Values._internal.mgmt_cluster | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/descheduler\" (tuple . \"descheduler\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/loki-aggregated-secrets\" (tuple . \"loki\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/loki-aggregated-secrets-cleanup\" (and (tuple . \"loki\" | include \"unit-enabled\") (not .Values._internal.state.is_upgrade)) | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/thanos\" (tuple . \"thanos\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/check-images\" .Values.security.upstream_images_signature.verify | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/mutate-job-security-context\" (tuple . \"rancher\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/neuvector-policies\" (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - ../exclude-kube-system # needs to be last (needed for components above that use sylva.org/kyverno-exclude-kube-system annotation)\n"
+ " - '{{ tuple \"components/coredns-custom-hosts-import\" (tuple . \"cabpoa\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/grafana-oidc\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " kyverno-policies-ready:\n"
+ " info:\n"
+ " description: additional delay to ensure that kyverno webhooks are properly installed in api-server\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno-policies\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch-inline\" \"sleep 60\" }}'\n\n"
+ " kyverno-vault-restart-policy:\n"
+ " info:\n"
+ " description: restart vault after certs renewal\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " vault: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/kyverno-vault-restart-policy\n"
+ " wait: true\n\n"
+ " kyverno-metal3-policies:\n"
+ " info:\n"
+ " description: kyverno policies specific to capm3-system\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"metal3\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " metal3: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/metal3-policies\n"
+ " wait: true\n\n"
+ " kyverno-update-namespace-and-psa:\n"
+ " info:\n"
+ " description: grants to Kyverno the permission to update namespaces using the \"updatepsa\" verb (Rancher-specific)\n"
+ " internal: true\n"
+ " details: >\n"
+ " This unit allows Kyverno to define namespaces with specific PodSecurityAdmission levels.\n"
+ " It is useful for situations where namespaces need to be mutated (with PSA labels)\n"
+ " in order to accomodate privileged pods (for which PSA level restricted at cluster level is not enough),\n"
+ " when namespace creation is not controlled\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " rancher: true # updatepsa verb for projects resources in management.cattle.io apiGroup permission is Rancher specific\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-psa\n"
+ " wait: true\n\n"
+ " rancher-monitoring-clusterid-inject:\n"
+ " info:\n"
+ " description: injects Rancher cluster ID in Helm values of Rancher monitoring chart\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno: '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " rancher: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-monitoring-clusterid-inject\n"
+ " wait: true\n\n"
+ " mgmt-cluster-state-values:\n"
+ " info:\n"
+ " description: manages workload cluster parameters which reflect management cluster state\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " sylva-units-operator: '{{ tuple . \"sylva-units-operator\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/mgmt-cluster-state-values\n"
+ " wait: true\n"
+ " force: true\n"
+ " targetNamespace: sylva-system\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: ConfigMap\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /data/values\n"
+ " value: |\n"
+ " {{- tuple . \"mgmt_cluster_state_values\" | include \"interpret\" }}\n"
+ " {{ .Values.mgmt_cluster_state_values | toYaml | indent 4 }}\n"
+ " _components:\n"
+ " - '{{ tuple \"components/capm3\" (tuple . \"capm3\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " capi:\n"
+ " info:\n"
+ " description: installs Cluster API core operator\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capi\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " capd:\n"
+ " info:\n"
+ " description: installs Docker CAPI infra provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capd\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CAPD_DOCKER_HOST: '{{ .Values.capd_docker_host }}'\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: proxy-env-vars\n"
+ " optional: true\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " capo-orc:\n"
+ " info:\n"
+ " description: installs OpenStack Resource Controller (orc)\n"
+ " details: |\n"
+ " The [OpenStack Resource Controller](https://k-orc.cloud/) (a.k.a. ORC) is a component used by CAPO controller\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"capo\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capo-orc\n"
+ " wait: true\n\n"
+ " capo:\n"
+ " info:\n"
+ " description: installs OpenStack CAPI infra provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " capo-orc: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capo\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n"
+ " _patches:\n"
+ " - target:\n"
+ " group: apps\n"
+ " version: v1\n"
+ " kind: Deployment\n"
+ " name: capo-controller-manager\n"
+ " namespace: capo-system\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n\n"
+ " metal3-ipam:\n"
+ " info:\n"
+ " description: installs IP Address Manager for Metal3 CAPI provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " capi: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/metal3-ipam\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " capm3:\n"
+ " info:\n"
+ " description: installs Metal3 CAPI infra provider, for baremetal\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " metal3-ipam: true\n"
+ " metal3: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capm3\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " ENABLE_BMH_NAME_BASED_PREALLOCATION: \"true\"\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " capv:\n"
+ " info:\n"
+ " description: installs vSphere CAPI infra provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capv\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " capone:\n"
+ " info:\n"
+ " description: installs OpenNebula CAPONE infra provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capone\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " cabpk:\n"
+ " info:\n"
+ " description: installs Kubeadm CAPI bootstrap provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cabpk\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n\n"
+ " cabpck:\n"
+ " info:\n"
+ " description: installs Canonical CAPI bootstrap provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cabpck\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " force_var_substitution_enabled: \"true\" # dummy value to ensure substitution of defaults\n"
+ " wait: true\n\n"
+ " cabpr:\n"
+ " info:\n"
+ " description: installs RKE2 CAPI bootstrap provider\n"
+ " maturity: core-component\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cabpr\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " var_substitution_enabled: \"true\" # To force substitution of default controller diagnostics flags\n"
+ " _patches:\n"
+ " - target:\n"
+ " group: apps\n"
+ " version: v1\n"
+ " kind: Deployment\n"
+ " name: rke2-bootstrap-controller-manager\n"
+ " namespace: rke2-bootstrap-system\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " - target:\n"
+ " group: apps\n"
+ " version: v1\n"
+ " kind: Deployment\n"
+ " name: rke2-control-plane-controller-manager\n"
+ " namespace: rke2-control-plane-system\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/containers/0/securityContext\n"
+ " value:\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n\n"
+ " cabpoa:\n"
+ " info:\n"
+ " description: installs OKD/OpenShift CAPI bootstrap/controlplane provider\n"
+ " maturity: experimental\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " capm3: true\n"
+ " openshift-assisted-installer: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cabpoa\n"
+ " wait: true\n\n"
+ " openshift-assisted-installer:\n"
+ " info:\n"
+ " description: installs assisted installer operator for OKD\n"
+ " maturity: experimental\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " metal3: true\n"
+ " kyverno: true # test workaround for https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/2654#note_2114765991\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"cabpoa\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"{{ .Values.openshift.assisted.serviceHostname | default .Values.external_hostnames.openshift_assisted_service }} and {{ .Values.openshift.assisted.imageHostname | default .Values.external_hostnames.openshift_assisted_images }} must resolve to {{ .Values.display_external_ip }}\"\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/openshift-assisted-installer\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " ASSISTED_SERVICE_HOSTNAME: '{{ .Values.openshift.assisted.serviceHostname | default .Values.external_hostnames.openshift_assisted_service }}'\n"
+ " ASSISTED_IMAGE_HOSTNAME: '{{ .Values.openshift.assisted.imageHostname | default .Values.external_hostnames.openshift_assisted_images }}'\n"
+ " ASSISTED_DB_SIZE: '{{ .Values.openshift.assisted.dbSize }}'\n"
+ " ASSISTED_FS_SIZE: '{{ .Values.openshift.assisted.fsSize }}'\n"
+ " ASSISTED_IMAGESTORE_SIZE: '{{ .Values.openshift.assisted.imagestoreSize }}'\n"
+ " ASSISTED_OS_IMAGES: '{{ .Values.openshift.assisted.osImages | toJson }}'\n"
+ " HTTP_PROXY: '{{ .Values.proxies.http_proxy }}'\n"
+ " HTTPS_PROXY: '{{ .Values.proxies.https_proxy }}'\n"
+ " NO_PROXY: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " healthChecks:\n"
+ " - apiVersion: apps/v1\n"
+ " kind: StatefulSet\n"
+ " name: assisted-image-service\n"
+ " namespace: assisted-installer\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: assisted-service\n"
+ " namespace: assisted-installer\n\n"
+ " openshift-security-context-constraints:\n"
+ " info:\n"
+ " description: sets up openshift security context constraints for operators not installed via RedHat operator lifecycle manager(OLM)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: '{{ eq .Values.cluster.capi_providers.bootstrap_provider \"cabpoa\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/openshift-security-context-constraints\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/ceph-csi-cephfs\" (tuple . \"ceph-csi-cephfs\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/goldpinger\" (tuple . \"goldpinger\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " metal3-sylva-ca-init:\n"
+ " info:\n"
+ " description: injects sylva-ca certificate in metal3\n"
+ " details: this certificate is needed to download baremetal os images via https\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"metal3\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"os-image-server\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " ingress-nginx-init: true\n"
+ " external-secrets-operator: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/sylva-ca-external-secret\n"
+ " wait: true\n"
+ " targetNamespace: metal3-system\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CERTIFICATE_NAMESPACE: metal3-system\n"
+ " CACERT_SECRET_NAME: tls-ca-additional # this Secret is used by metal3 chart when additionalTrustedCAs is true\n"
+ " CERT_FILE_NAME: sylva-ca.crt\n\n"
+ " metal3:\n"
+ " info:\n"
+ " description: installs SUSE-maintained Metal3 operator\n"
+ " maturity: stable\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " depends_on:\n"
+ " metal3-sylva-ca-init: '{{ tuple . \"metal3-sylva-ca-init\" | include \"unit-enabled\" }}'\n"
+ " namespace-defs: true\n"
+ " cert-manager: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': '{{ not .Values._internal.state.is_upgrade }}'\n"
+ " metal3-pdb: '{{ tuple . \"metal3-pdb\" | include \"unit-enabled\" }}'\n"
+ " metallb-resources: '{{ tuple . \"metallb\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://suse-edge.github.io/charts\n"
+ " helm_chart_artifact_name: metal3\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: metal3\n"
+ " version: 0.12.7\n"
+ " timeout: 30m\n"
+ " targetNamespace: metal3-system\n"
+ " values:\n"
+ " global:\n"
+ " additionalTrustedCAs: true # results in having the chart use the metal3-system/tls-ca-additional Secret which is produced by metal3-sylva-ca-init unit\n"
+ " ironicIP: '{{ .Values.display_external_ip }}'\n"
+ " provisioningInterface: eth0\n"
+ " metal3-ironic:\n"
+ " images:\n"
+ " ironicIPADownloader:\n"
+ " repository: registry.opensuse.org/isv/suse/edge/containers/images/ironic-ipa-downloader-x86_64\n"
+ " fullnameOverride: metal3-ironic\n"
+ " service:\n"
+ " type: LoadBalancer\n"
+ " annotations: '{{ .Values._internal.lb_service_annotations | default dict | include \"preserve-type\" }}'\n"
+ " baremetaloperator:\n"
+ " ironichostNetwork: false\n"
+ " persistence:\n"
+ " ironic:\n"
+ " size: 10Gi\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " accessMode: ReadWriteOnce\n"
+ " metal3-baremetal-operator:\n"
+ " replicaCount: '{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}'\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |\n"
+ " kind: Service\n"
+ " apiVersion: v1\n"
+ " metadata:\n"
+ " name: metal3-ironic\n"
+ " spec:\n"
+ " type: LoadBalancer\n"
+ " loadBalancerClass: '{{ .Values._internal.loadBalancerClass }}'\n\n"
+ " metal3-pdb:\n"
+ " info:\n"
+ " description: add pdb to baremetal-operator pods\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"metal3\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values._internal.ha_cluster.is_ha }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " kyverno: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/metal3-pdb\n"
+ " wait: true\n\n"
+ " local-path-provisioner:\n"
+ " info:\n"
+ " description: installs local-path CSI\n"
+ " maturity: stable\n"
+ " repo: local-path-provisioner\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: deploy/chart/local-path-provisioner\n"
+ " targetNamespace: kube-system\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " storageClass:\n"
+ " defaultClass: '{{ .Values._internal.default_storage_class | eq \"local-path\" | include \"as-bool\" }}'\n"
+ " nodePathMap:\n"
+ " - node: DEFAULT_PATH_FOR_NON_LISTED_NODES\n"
+ " paths:\n"
+ " - /var/lib/kubelet/local-path-provisioner\n"
+ " helperImage:\n"
+ " repository: docker.io/library/busybox\n"
+ " tag: 1.37.0\n\n"
+ " cluster-bmh:\n"
+ " info:\n"
+ " description: definitions for Cluster API BareMetalHosts resources (capm3)\n"
+ " maturity: core-component\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " repo: sylva-capi-cluster\n"
+ " helm_chart_artifact_name: sylva-capi-cluster\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" }}'\n"
+ " #depends_on: the dependency on metal3 is handled in management.values.yaml\n"
+ " # and workload-cluster.values.yaml\n"
+ " kustomization_spec:\n"
+ " prune: '{{ not (eq .Release.Namespace \"sylva-system\") | include \"preserve-type\" }}'\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " # The 'online' parameter is managed by BareMetal and Metal3 operators,\n"
+ " # while 'automatedCleaningMode' is managed by a Kyverno policy (https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/3073).\n"
+ " # Therefore, drift detection should ignore these fields.\n"
+ " ignore:\n"
+ " - target:\n"
+ " kind: BareMetalHost\n"
+ " paths:\n"
+ " - /spec/online\n"
+ " - /spec/automatedCleaningMode\n"
+ " - target:\n"
+ " kind: Secret\n"
+ " paths:\n"
+ " - /metadata/labels/environment.metal3.io\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: charts/sylva-capi-cluster\n"
+ " values:\n"
+ " resource_groups:\n"
+ " - baremetal-hosts\n"
+ " # we pass everything that is under `cluster` to this unit that uses sylva-capi-cluster chart\n"
+ " # (we do it via a secret because some of the values are credentials in many scenarios)\n"
+ " helm_secret_values: '{{ .Values.cluster | include \"preserve-type\" }}'\n\n"
+ " cluster-node-provider-id-blacklist:\n"
+ " info:\n"
+ " description: ValidatingAdmissionPolicy to prevent nodes from being recreated with a providerID that has already been used\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"misc-controllers-suite\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" }}'\n"
+ " depends_on:\n"
+ " misc-controllers-suite: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/cluster-node-provider-id-blacklist\n"
+ " wait: true\n\n"
+ " rke2-helmchart-prevent-uninstall:\n"
+ " info:\n"
+ " description: Kyverno policy to prevent key Helm charts from being uninstalled by RKE2 HelmChart controller\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - '{{ tuple \"base-deps\" (not .Values._internal.state.is_upgrade ) | include \"set-only-if\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " # on an upgrade we don't want this unit to be applied after 'cluster' unit reconciliation,\n"
+ " # because we want the policy to be active during node rolling update\n"
+ " # but on a fresh install, we setup this policy only after 'cluster' unit (the first\n"
+ " # RKE2 installation installs the RKE2 HelmChart CRD):\n"
+ " cluster: '{{ and (not .Values._internal.state.is_upgrade)\n"
+ " (tuple . \"cluster\" | include \"unit-enabled\") }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/rke2-helmchart-prevent-uninstall\n"
+ " wait: true\n\n"
+ " cluster-maxunavailable-apply:\n"
+ " info:\n"
+ " description: ensure that cluster-maxunavailable effective before updating 'cluster' unit\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - kube-job\n"
+ " enabled_conditions: # we enable this unit only if we transition from \"cluster-maxunavailable not used\"\n"
+ " # to \"cluster-maxunavailable used\", because it's only in that case that we\n"
+ " # need to ensure that all Machines have the pre-drain hook before the cluster unit\n"
+ " # is updated\n"
+ " # cluster-maxunavailable functionality is wanted:\n"
+ " - '{{ hasKey (.Values.cluster | dig \"annotations\" \"cluster\" dict) \"cluster-maxunavailable.sylva.org/enabled\" }}'\n"
+ " # cluster-maxunavailable functionality is not yet setup:\n"
+ " - >-\n"
+ " {{- if .Values._internal.state.is_upgrade -}}\n"
+ " {{- lookup \"cluster.x-k8s.io/v1beta1\" \"Cluster\" .Release.Namespace .Values.cluster.name\n"
+ " | dig \"metadata\" \"annotations\" \"cluster-maxunavailable.sylva.org/enabled\" \"-unset-\"\n"
+ " | eq \"-unset-\" -}}\n"
+ " {{- end -}}\n"
+ " kustomization_spec:\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict\n"
+ " \"CLUSTER_NAME\" .Values.cluster.name\n"
+ " )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/cluster-apply-cluster-maxunavailable.sh\") }}'\n\n"
+ " cluster:\n"
+ " info:\n"
+ " description: holds the Cluster API definition for the cluster\n"
+ " maturity: core-component\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " repo: sylva-capi-cluster\n"
+ " unit_templates: []\n"
+ " helm_chart_artifact_name: sylva-capi-cluster\n"
+ " depends_on:\n"
+ " # see management.values.yaml and workload-cluster.values.yaml to see how\n"
+ " # we handle the fact that this unit depends on the CAPI-related components being ready\n"
+ " os-images-info: '{{ list \"capo\" \"capm3\" | has .Values.cluster.capi_providers.infra_provider }}'\n"
+ " capo-cluster-resources: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " capo-network-settings: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " sync-openstack-images: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " cluster-bmh: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" }}'\n"
+ " cluster-node-provider-id-blacklist: '{{ tuple . \"cluster-node-provider-id-blacklist\" | include \"unit-enabled\" }}'\n"
+ " longhorn-instance-manager-cleanup: '{{ tuple . \"longhorn-instance-manager-cleanup\" | include \"unit-enabled\" }}'\n"
+ " rke2-helmchart-prevent-uninstall: '{{ and (tuple . \"rke2-helmchart-prevent-uninstall\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " metallb: '{{ and (tuple . \"metallb\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " calico: '{{ .Values._internal.state.is_upgrade }}'\n"
+ " calico-ready: '{{ .Values._internal.state.is_upgrade }}'\n"
+ " cluster-garbage-collector: '{{ and (tuple . \"cluster-garbage-collector\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " misc-controllers-suite: '{{ and (tuple . \"misc-controllers-suite\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " cluster-maxunavailable-apply: '{{ tuple . \"cluster-maxunavailable-apply\" | include \"unit-enabled\" }}'\n"
+ " longhorn-update-stale-replica-timeout: '{{ and (tuple . \"longhorn-update-stale-replica-timeout\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " kustomization_spec:\n"
+ " prune: '{{ not (eq .Release.Namespace \"sylva-system\") | include \"preserve-type\" }}'\n"
+ " # we wait on all important resources built by sylva-capi-cluster,\n"
+ " # *except* the MachineDeployments, since if we're using kubeadm as bootstrap\n"
+ " # we would have a deadlock: the default CNI unit would not deploy\n"
+ " # before the cluster unit is ready, and the cluster would not be ready until\n"
+ " # the CNI is deployed because the MachineDeployment nodes need the CNI to become\n"
+ " # ready (for the controlplane nodes, the kubeadm controlplane provider ignores that)\n"
+ " healthChecks: >-\n"
+ " {{\n"
+ " (include \"cluster-healthchecks\" (dict \"ns\" .Release.Namespace \"cluster\" .Values.cluster \"includeMDs\" false \"onlyCheckKubeConfig\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpck\")) | fromYaml).result\n"
+ " | include \"preserve-type\"\n"
+ " }}\n"
+ " helmrelease_spec:\n"
+ " uninstall:\n"
+ " timeout: '{{ mul .Values.cluster.timeouts.cluster_delete_hook_job_timeout 1.2 }}s'\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: charts/sylva-capi-cluster\n"
+ " values:\n"
+ " resource_groups:\n"
+ " - cluster-resource\n"
+ " - base\n"
+ " unset_cluster_paused: '{{ .Release.IsUpgrade | include \"preserve-type\" }}'\n"
+ " freeze_first_node_files: '{{ .Values._internal.state.is_upgrade | include \"preserve-type\" }}'\n"
+ " valuesFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " valuesKey: allocated_ip\n"
+ " targetPath: cluster_virtual_ip\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " valuesKey: allocated_fip\n"
+ " targetPath: cluster_public_ip\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " valuesKey: control_plane_servergroup_id\n"
+ " targetPath: control_plane.capo.server_group_id\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " valuesKey: worker_servergroup_id\n"
+ " targetPath: machine_deployment_default.capo.server_group_id\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: openstack-images-uuids ## this ConfigMap is a byproduct of the sync-openstack-images job\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n\n"
+ " # os-images-info ConfigMap\n"
+ " #\n"
+ " # this ConfigMap is a byproduct of the os-images-info unit\n"
+ " #\n"
+ " # it is used for capo and capm3\n"
+ " #\n"
+ " # for capm3 workload clusters it is important to have it as an override of the kyverno-cloned-os-images-info-capm3\n"
+ " # to ensure that whenever there would be an image with the same image key in workload and mgmt context but for a\n"
+ " # different content, the workload cluster sylva-capi-cluster chart receives the one coming from the os-images-info\n"
+ " # computed by the workload cluster sylva-units Helm release based on the value of the workload cluster\n"
+ " - |\n"
+ " {{-\n"
+ " tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"os-images-info\")\n"
+ " (or (.Values.cluster.capi_providers.infra_provider | eq \"capm3\")\n"
+ " (.Values.cluster.capi_providers.infra_provider | eq \"capo\")\n"
+ " )\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n\n"
+ " # ConfigMap for capm3_os_image_server_images\n"
+ " # this ConfigMap is a byproduct of the os-image-server unit (from mgmt cluster)\n"
+ " #\n"
+ " # it is used for capm3 only\n"
+ " #\n"
+ " # for the mgmt cluster, the configmap is the capm3-os-image-server-os-images ConfigMap produced by os-image-server unit\n"
+ " # for for workload clusters, this ConfigMap is a Kyverno-cloned copy, in each cluster namespace, of the output ConfigMap of the os-image-server unit\n"
+ " # which reflects what the os-image-server unit in mgmt context is currently serving\n"
+ " - |\n"
+ " {{-\n"
+ " tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" .Values._internal.os_image_server_images_configmap)\n"
+ " (.Values.cluster.capi_providers.infra_provider | eq \"capm3\")\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n\n"
+ " # ConfigMaps to inject allocated_ip and mtu in Calico configuration\n"
+ " #\n"
+ " # only used for capo\n"
+ " # byproduct of the capo-cluster-resources and capo-network-settings units\n"
+ " - |\n"
+ " {{-\n"
+ " tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"capo-cluster-resources\"\n"
+ " \"valuesKey\" \"allocated_ip\"\n"
+ " \"targetPath\" \"cni.calico.helm_values.installation.calicoNetwork.nodeAddressAutodetectionV4.canReach\"\n"
+ " )\n"
+ " .Values._internal.capo_calico_autodetection_method_use_canReach_vip\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n"
+ " - |\n"
+ " {{-\n"
+ " tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"capo-network-mtu\"\n"
+ " \"valuesKey\" \"calico_mtu\"\n"
+ " \"targetPath\" \"cni.calico.helm_values.installation.calicoNetwork.mtu\"\n"
+ " )\n"
+ " (eq .Values.cluster.capi_providers.infra_provider \"capo\")\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n"
+ " # we pass everything that is under `cluster` to this unit that uses sylva-capi-cluster chart\n"
+ " # (we do it via a secret because some of the values are credentials in many scenarios)\n"
+ " helm_secret_values: '{{ .Values.cluster | include \"preserve-type\" }}'\n\n"
+ " cluster-garbage-collector:\n"
+ " info:\n"
+ " description: installs cronjob responsible for unused CAPI resources cleaning\n"
+ " internal: true\n"
+ " enabled: false # until https://gitlab.com/sylva-projects/sylva-core/-/issues/2820 is fully fixed\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-cronjob\n"
+ " depends_on:\n"
+ " cluster: false # we can't depend directly on 'cluster' unit, since it's being disabled in 'management-sylva-units' and re-enabled by 'pivot'\n"
+ " capi: true\n"
+ " capd: '{{ tuple . \"capd\" | include \"unit-enabled\" }}'\n"
+ " capv: '{{ tuple . \"capv\" | include \"unit-enabled\" }}'\n"
+ " capo: '{{ tuple . \"capo\" | include \"unit-enabled\" }}'\n"
+ " capm3: '{{ tuple . \"capm3\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cluster-garbage-collector\n"
+ " _components:\n"
+ " - '{{ tuple \"components/dev-ci-cronjob-schedule\" (list \"dev\" \"ci\" | has .Values.env_type) | include \"set-only-if\" }}'\n\n"
+ " cluster-ready:\n"
+ " info:\n"
+ " description: unit to check readiness of cluster CAPI objects\n"
+ " details: the healthChecks on this unit complements the one done in the 'cluster' unit, which in some cases can't cover all CAPI resources\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - dummy\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"cluster\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " cluster: true\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " sylvactl/timeoutReference: \"Kustomization/{{ .Release.Namespace }}/cluster\"\n"
+ " kustomization_spec:\n"
+ " healthChecks: >-\n"
+ " {{\n"
+ " (include \"cluster-healthchecks\" (dict \"ns\" .Release.Namespace \"cluster\" .Values.cluster) | fromYaml).result\n"
+ " | include \"preserve-type\"\n"
+ " }}\n\n"
+ " cluster-reachable:\n"
+ " info:\n"
+ " internal: true\n"
+ " description: ensure that created clusters are reachable, and make failure a bit more explicit if it is not the case\n"
+ " details: >\n"
+ " This unit will be enabled in bootstrap cluster to check connectivity to management cluster\n"
+ " and in various workload-cluster namespaces in management cluster to check connectivity to workload clusters\n"
+ " unit_templates:\n"
+ " - dummy\n"
+ " enabled: false\n"
+ " depends_on:\n"
+ " # cluster-reachable depends on cluster only on first installation\n"
+ " cluster: '{{ not .Values._internal.state.is_upgrade }}'\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: 15m\n"
+ " kustomization_spec:\n"
+ " targetNamespace: default\n"
+ " kubeConfig:\n"
+ " secretRef:\n"
+ " name: '{{ .Values.cluster.name }}-kubeconfig'\n"
+ " _components:\n"
+ " - ../configmap-component\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " UNIT_NAME: cluster-reachable\n"
+ " wait: true\n\n"
+ " cluster-machines-ready:\n"
+ " info:\n"
+ " description: unit used to wait for all CAPI resources to be ready\n"
+ " details: |\n"
+ " This unit is here so that activity on all units is held off until all the CAPI resources are ready.\n"
+ " This is a distinct unit from 'cluster-ready' because the readiness criteria is different: here\n"
+ " we not only want the cluster to be ready to host some workload (which only requires some CAPI resources\n"
+ " to be ready) we want all CAPI resources to be ready.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"cluster\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " # for kubeadm, calico is installed by Flux and Machines don't become ready until calico is installed\n"
+ " # so we need to have cluster-machines-ready depend on 'calico'\n"
+ " # (this is relevant only on installation, on upgrades calico is already installed)\n"
+ " '{{ .Values._internal.calico_readiness_unit }}': '{{ and (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\") (not .Values._internal.state.is_upgrade) }}'\n"
+ " cluster: true\n"
+ " cluster-ready: true # no need to start waiting for anything until\n"
+ " opennebula-cpi: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capone\" }}'\n"
+ " vsphere-cpi: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capv\" }}'\n"
+ " unit_templates:\n"
+ " - kube-job\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" (add (include \"cluster-unit-timeout\" . | int) 10) }}'\n"
+ " sylvactl/timeoutReference: \"Kustomization/{{ .Release.Namespace }}/cluster\"\n"
+ " kustomization_spec:\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict\n"
+ " \"WAIT_TIMEOUT\" .Values.cluster_machines_ready.wait_timeout\n"
+ " \"CLUSTER_NAME\" .Values.cluster.name\n"
+ " \"CONTROL_PLANE\" .Values._internal.controlplane_kind\n"
+ " \"BOOTSTRAP_PROVIDER\" .Values.cluster.capi_providers.bootstrap_provider\n"
+ " )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/cluster-machines-ready.sh\") }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " POD_ACTIVE_DEADLINE_SECONDS: \"1800\" # if a checks takes more than 30 minutes to finalize,\n"
+ " # then it's better to start a new one, it might be because it was stuck\n"
+ " # and it's harmless to do a wait check again\n\n"
+ " heat-operator:\n"
+ " info:\n"
+ " description: installs OpenStack Heat operator\n"
+ " maturity: core-component\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/heat-operator\n"
+ " wait: true\n\n"
+ " sylva-units-operator:\n"
+ " info:\n"
+ " description: installs sylva-units operator\n"
+ " maturity: experimental\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " flux-system: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/sylva-units-operator\n"
+ " wait: true\n\n"
+ " workload-cluster-operator:\n"
+ " info:\n"
+ " description: installs Sylva operator for managing workload clusters\n"
+ " maturity: experimental\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " sylva-units-operator: '{{ tuple . \"sylva-units-operator\" | include \"unit-enabled\" }}'\n"
+ " external-secrets-operator: '{{ tuple . \"external-secrets-operator\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/workload-cluster-operator\n"
+ " wait: true\n\n"
+ " misc-controllers-suite:\n"
+ " info:\n"
+ " description: Sylva controllers from [misc-controllers-suite project](https://gitlab.com/sylva-projects/sylva-elements/misc-controllers-suite)\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # ENABLE_CLUSTER_MAXUNAVAILABLE: \"true\" # only in management.values.yaml\n"
+ " ENABLE_PROVIDER_ID_BLACKLIST: \"true\"\n"
+ " path: ./kustomize-units/misc-controllers-suite\n"
+ " wait: true\n\n"
+ " capo-cloud-config:\n"
+ " info:\n"
+ " description: creates CAPO cloud-config used to produce Heat stack\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capo-cluster-resources/cloud-config\n"
+ " wait: true\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " kustomization_substitute_secrets:\n"
+ " CAPO_CLOUD_YAML: '{{ .Values.cluster.capo.clouds_yaml | toYaml | b64enc }}'\n"
+ " CAPO_CACERT: '{{ (.Values.cluster.capo.cacert|default \"\") | b64enc }}'\n\n"
+ " capo-cluster-resources:\n"
+ " info:\n"
+ " description: installs OpenStack Heat stack for CAPO cluster prerequisites\n"
+ " internal: true\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " capo-cloud-config: true\n"
+ " heat-operator: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/capo-cluster-resources/heat-stack\n"
+ " wait: true\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " STACK_NAME_PREFIX: '{{ .Values.cluster.name }}-{{ tuple . .Values.cluster.capo.resources_tag | include \"interpret-as-string\" | replace \".\" \"-\" }}'\n"
+ " CAPO_TAG: '{{ .Values.cluster.capo.resources_tag }}'\n"
+ " CAPO_NETWORK_ID: '{{ .Values.cluster.capo.network_id }}'\n"
+ " CONTROL_PLANE_AFFINITY_POLICY: '{{ .Values.openstack.control_plane_affinity_policy }}'\n"
+ " WORKER_AFFINITY_POLICY: '{{ .Values.openstack.worker_affinity_policy }}'\n"
+ " CAPO_EXTERNAL_NETWORK_ID: '{{ tuple .Values.openstack.external_network_id .Values.openstack.external_network_id | include \"set-only-if\" }}'\n"
+ " CONTROL_PLANE_SECURITY_GROUP_NAME: 'capo-{{ .Values.cluster.name }}-security-group-ctrl-plane-{{ .Values.cluster.capo.resources_tag }}'\n"
+ " WORKER_SECURITY_GROUP_NAME: 'capo-{{ .Values.cluster.name }}-security-group-workers-{{ .Values.cluster.capo.resources_tag }}'\n"
+ " CAPO_CREATE_IRONIC_SECURITY_GROUP: '{{ tuple . (and (tuple . \"metal3\" | include \"unit-enabled\") (.Values.cluster.capi_providers.infra_provider | eq \"capo\")) \"true\" \"false\" | include \"interpret-ternary\" }}'\n"
+ " COMMON_SECURITY_GROUP_NAME: 'capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}'\n"
+ " DUMMY_UPDATE_TRIGGER: 'helm-release-revision/{{ .Release.Revision }},sylva-units-version/{{ .Chart.Version }}'\n"
+ " CLUSTER_VIRTUAL_IP_PORT_UUID: '{{ .Values.openstack.cluster_virtual_ip_port_uuid | default \"\" }}'\n"
+ " CLUSTER_FLOATING_IP_UUID: '{{ .Values.openstack.cluster_floating_ip_uuid | default \"\" }}'\n\n"
+ " capo-network-settings:\n"
+ " info:\n"
+ " description: ConfigMap that contains the network settings for CAPO\n"
+ " internal: true\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " depends_on:\n"
+ " capo-cloud-config: true\n"
+ " kustomization_spec:\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict\n"
+ " \"CALICO_ENCAPSULATION_OVERHEAD\" .Values._internal.calico_encapsulation_overhead\n"
+ " \"RELEASE_NAMESPACE\" .Release.Namespace\n"
+ " ) }}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/capo-network-settings.sh\") }}'\n\n"
+ " calico-crd:\n"
+ " info:\n"
+ " description: installs Calico CRDs\n"
+ " maturity: stable\n"
+ " hidden: true\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - '{{ tuple \"base-deps\" (not .Values._internal.state.is_upgrade) | include \"set-only-if\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"calico\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " helm_repo_url: https://rke2-charts.rancher.io\n"
+ " helm_chart_versions:\n"
+ " v3.30.100: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}\n"
+ " v3.30.300: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}\n"
+ " helmrelease_spec:\n"
+ " releaseName: rke2-calico-crd\n"
+ " targetNamespace: kube-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-calico-crd\n"
+ " version: \"\" # will be defined by helm_chart_versions\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " calico:\n"
+ " info:\n"
+ " description: install Calico CNI\n"
+ " maturity: stable\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - '{{ tuple \"base-deps\" (not .Values._internal.state.is_upgrade) | include \"set-only-if\" }}'\n"
+ " depends_on:\n"
+ " calico-crd: true\n"
+ " capo-network-settings: '{{ tuple . \"capo-network-settings\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://rke2-charts.rancher.io\n"
+ " helm_chart_versions:\n"
+ " v3.30.100: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}\n"
+ " v3.30.300: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}\n"
+ " helmrelease_spec:\n"
+ " # Setting drift detection mode to warn for calico till https://gitlab.com/sylva-projects/sylva-core/-/issues/1814 is solved.\n"
+ " driftDetection:\n"
+ " mode: warn\n"
+ " releaseName: '{{ list \"cabpk\" \"cabpck\" | has .Values.cluster.capi_providers.bootstrap_provider | ternary \"calico\" \"rke2-calico\" }}'\n"
+ " targetNamespace: '{{ list \"cabpk\" \"cabpck\" | has .Values.cluster.capi_providers.bootstrap_provider | ternary \"sylva-system\" \"kube-system\" }}'\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-calico\n"
+ " version: \"\" # will be defined by helm_chart_versions\n"
+ " values: '{{ .Values.calico_helm_values | include \"preserve-type\" }}'\n"
+ " valuesFrom:\n"
+ " - |\n"
+ " {{- tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"capo-network-mtu\"\n"
+ " \"valuesKey\" \"calico_mtu\"\n"
+ " \"targetPath\" \"installation.calicoNetwork.mtu\"\n"
+ " )\n"
+ " (eq .Values.cluster.capi_providers.infra_provider \"capo\")\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n"
+ " # For Calico nodeAddressAutodetection, for RKE2 on CAPO, we need to pass the cluster VIP in calico values\n"
+ " # as installation.calicoNetwork.nodeAddressAutodetectionV4.canReach\n"
+ " #\n"
+ " # this address is read from the ConfigMap produced by capo-cluster-resources unit\n"
+ " # which allocates the VIP\n"
+ " - |\n"
+ " {{- tuple\n"
+ " (dict \"kind\" \"ConfigMap\"\n"
+ " \"name\" \"capo-cluster-resources\"\n"
+ " \"valuesKey\" \"allocated_ip\"\n"
+ " \"targetPath\" \"installation.calicoNetwork.nodeAddressAutodetectionV4.canReach\"\n"
+ " )\n"
+ " .Values._internal.capo_calico_autodetection_method_use_canReach_vip\n"
+ " | include \"set-only-if\"\n"
+ " -}}\n\n"
+ " calico-ready:\n"
+ " info:\n"
+ " internal: true\n"
+ " description: ensure Calico resources created by the Tigera operator are ready before running further steps\n"
+ " details: >\n"
+ " This unit will be enabled in bootstrap cluster to confirm management cluster CNI readiness\n"
+ " and in various workload-cluster namespaces in management cluster to do the same for workload clusters\n"
+ " maturity: beta\n"
+ " enabled: false # disabled in management cluster only for initial installation, see management.values.yaml\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"calico\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - dummy\n"
+ " depends_on:\n"
+ " calico: true\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: default\n"
+ " kubeConfig:\n"
+ " secretRef:\n"
+ " name: '{{ .Values.cluster.name }}-kubeconfig'\n"
+ " healthChecks:\n"
+ " - apiVersion: operator.tigera.io/v1\n"
+ " kind: Installation\n"
+ " name: default\n\n"
+ " metallb:\n"
+ " info:\n"
+ " description: installs MetalLB operator\n"
+ " maturity: stable\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ or (.Values.cluster.capi_providers.infra_provider | eq \"capd\")\n"
+ " (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\") }}'\n"
+ " depends_on:\n"
+ " monitoring-crd: '{{ tuple . \"monitoring-crd\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.calico_readiness_unit }}': '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" }}'\n"
+ " cluster-vip: '{{ and (tuple . \"cluster-vip\" | include \"unit-enabled\") .Values._internal.state.is_upgrade }}'\n"
+ " helm_repo_url: https://metallb.github.io/metallb\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " kind: CustomResourceDefinition\n"
+ " name: bgppeers.metallb.io\n"
+ " paths:\n"
+ " - /spec/conversion/webhook/clientConfig/caBundle\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: metallb\n"
+ " version: 0.15.2\n"
+ " targetNamespace: metallb-system\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values: '{{ .Values.metallb_helm_values | include \"preserve-type\" }}'\n\n"
+ " metallb-resources:\n"
+ " info:\n"
+ " description: configures metallb resources\n"
+ " internal: true\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"metallb\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " depends_on:\n"
+ " metallb: '{{ not .Values._internal.state.is_upgrade }}'\n"
+ " repo: metallb-resources\n"
+ " helm_chart_artifact_name: metallb-resources\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: metallb-system\n"
+ " valuesFrom:\n"
+ " # for capo the cluster VIP has to be taken from capo-cluster-resources ConfigMap (and isn't included in .Values._internal.metallb above)\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " valuesKey: allocated_ip\n"
+ " targetPath: cluster_virtual_ip # will result in the creation of the 'lbpool' IPAddressPool\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " helm_secret_values: |-\n"
+ " {{- $resources := dict }}\n"
+ " {{- tuple $resources .Values.metallb .Values._internal.metallb | include \"merge-append\" }}\n"
+ " {{ $resources | include \"preserve-type\" }}\n\n"
+ " cinder-csi:\n"
+ " info:\n"
+ " description: installs OpenStack Cinder CSI\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " helm_repo_url: https://kubernetes.github.io/cloud-provider-openstack\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: openstack-cinder-csi\n"
+ " version: 2.33.1\n"
+ " targetNamespace: cinder-csi\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " clusterID: '{{ .Values.cluster.capo.resources_tag }}'\n"
+ " storageClass:\n"
+ " enabled: false\n"
+ " delete:\n"
+ " isDefault: false\n"
+ " allowVolumeExpansion: true\n"
+ " retain:\n"
+ " isDefault: false\n"
+ " allowVolumeExpansion: true\n"
+ " custom: |-\n"
+ " ---\n"
+ " apiVersion: storage.k8s.io/v1\n"
+ " kind: StorageClass\n"
+ " metadata:\n"
+ " name: \"{{ .Values.openstack.storageClass.name }}\"\n"
+ " annotations:\n"
+ " storageclass.kubernetes.io/is-default-class: \"{{ eq .Values._internal.default_storage_class .Values.openstack.storageClass.name }}\"\n"
+ " provisioner: cinder.csi.openstack.org\n"
+ " volumeBindingMode: Immediate\n"
+ " reclaimPolicy: Delete\n"
+ " allowVolumeExpansion: true\n"
+ " parameters:\n"
+ " type: \"{{ .Values.openstack.storageClass.type }}\"\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - target:\n"
+ " kind: Deployment\n"
+ " name: openstack-cinder-csi-controllerplugin\n"
+ " patch: |\n"
+ " kind: Deployment\n"
+ " metadata:\n"
+ " name: openstack-cinder-csi-controllerplugin\n"
+ " spec:\n"
+ " template:\n"
+ " metadata:\n"
+ " annotations:\n"
+ " clouds-yaml-hash: '{{ .Values._internal.clouds_yaml_hash }}'\n"
+ " helm_secret_values:\n"
+ " secret:\n"
+ " enabled: \"true\"\n"
+ " create: \"true\"\n"
+ " name: cinder-csi-cloud-config\n"
+ " data:\n"
+ " cloud.conf: |-\n"
+ " {{- if .Values.cluster.capi_providers.infra_provider | eq \"capo\" -}}\n"
+ " [Global]\n"
+ " auth-url = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.auth_url | quote }}\n"
+ " tenant-name = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.project_name | quote }}\n"
+ " domain-name = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.user_domain_name | quote }}\n"
+ " username = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.username | quote }}\n"
+ " password = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.password | quote }}\n"
+ " region = {{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.region_name | quote }}\n"
+ " tls-insecure = {{ not .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.verify }}\n"
+ " [BlockStorage]\n"
+ " ignore-volume-az = true\n"
+ " {{- end -}}\n\n"
+ " synchronize-secrets:\n"
+ " info:\n"
+ " description: allows secrets from Vault to be consumed other units, relies on ExternalSecrets\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " eso-secret-stores: true\n"
+ " vault-secrets: true\n"
+ " keycloak-init: '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/synchronize-secrets\n"
+ " _components:\n"
+ " - '{{ tuple \"components/keycloak\" (tuple . \"keycloak\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/rancher\" (tuple . \"rancher\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " FLUX_ADMIN_USERNAME: '{{ .Values.flux_webui.admin_user }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: cluster-user-auth\n"
+ " namespace: flux-system\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: cluster-creator-secret\n"
+ " namespace: sylva-system\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"apiVersion\" \"v1\"\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"rancher-bootstrap-secret\"\n"
+ " \"namespace\" \"cattle-system\"\n"
+ " )\n"
+ " (tuple . \"rancher\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"apiVersion\" \"v1\"\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"keycloak-bootstrap-admin\"\n"
+ " \"namespace\" \"keycloak\"\n"
+ " )\n"
+ " (tuple . \"keycloak\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " rancher-init:\n"
+ " info:\n"
+ " description: initializes and configures Rancher\n"
+ " maturity: beta\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.rancher.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-init\n"
+ " targetNamespace: cattle-system\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: rancher\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.rancher }}'\n"
+ " CERT: '{{ .Values.external_certificates.rancher.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " _components:\n"
+ " - '{{ tuple \"components/webhook-ha\" (.Values._internal.ha_cluster.is_ha) | include \"set-only-if\" }}'\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.rancher \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " rancher:\n"
+ " info:\n"
+ " description: installs Rancher\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " cert-manager: true\n"
+ " k8s-gateway: true\n"
+ " rancher-init: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " synchronize-secrets: true\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Rancher UI can be reached at https://{{ .Values.external_hostnames.rancher }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " sylvactl/unitTimeout: 10m\n"
+ " helm_repo_url: https://releases.rancher.com/server-charts/stable\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher\n"
+ " # When changing the major or minor version of rancher, make sure that renovate is\n"
+ " # also updated to accept the corresponding versions of the other rancher charts (monitory, sriov, longhorn, backup)\n"
+ " # See https://github.com/rancher/charts/blob/dev-v2.9/README.md\n"
+ " version: 2.11.3\n"
+ " targetNamespace: cattle-system\n"
+ " values:\n"
+ " additionalTrustedCAs: true\n"
+ " auditLog:\n"
+ " level: '{{ .Values.audit_log.level | include \"preserve-type\" }}'\n"
+ " privateCA: true\n"
+ " useBundledSystemChart: true\n"
+ " hostname: '{{ .Values.external_hostnames.rancher }}'\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " tls:\n"
+ " source: secret\n"
+ " secretName: rancher-tls\n"
+ " extraAnnotations:\n"
+ " nginx.ingress.kubernetes.io/proxy-body-size: 8m\n"
+ " # restrictedAdmin: true\n"
+ " # negative value will deploy 1 to abs(replicas) depending on available number of nodes\n"
+ " replicas: -3\n"
+ " features: embedded-cluster-api=false,provisioningv2=true,managed-system-upgrade-controller=false\n"
+ " proxy: '{{ get .Values.proxies \"https_proxy\" }}'\n"
+ " noProxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " postDelete:\n"
+ " namespaceList:\n"
+ " - cattle-fleet-system\n"
+ " - rancher-operator-system\n"
+ " extraEnv:\n"
+ " - name: CATTLE_BOOTSTRAP_PASSWORD\n"
+ " valueFrom:\n"
+ " secretKeyRef:\n"
+ " name: rancher-bootstrap-secret\n"
+ " key: bootstrapPassword\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |-\n"
+ " kind: Deployment\n"
+ " apiVersion: apps/v1\n"
+ " metadata:\n"
+ " name: rancher\n"
+ " spec:\n"
+ " template:\n"
+ " spec:\n"
+ " volumes:\n"
+ " - name: tls-ca-additional-volume\n"
+ " secret:\n"
+ " defaultMode: 256\n"
+ " items:\n"
+ " - key: ca.crt\n"
+ " path: ca-additional.pem\n"
+ " secretName: sylva-ca.crt\n"
+ " - name: tls-ca-volume\n"
+ " secret:\n"
+ " defaultMode: 256\n"
+ " secretName: rancher-tls\n"
+ " items:\n"
+ " - key: ca.crt\n"
+ " path: cacerts.pem\n"
+ " # this is to avoid that the too-short default liveness probe\n"
+ " # prevents the Rancher installation from finishing before the pod is killed:\n"
+ " containers:\n"
+ " - name: rancher\n"
+ " livenessProbe:\n"
+ " initialDelaySeconds: 120\n"
+ " periodSeconds: 30\n"
+ " failureThreshold: 20\n\n"
+ " kustomization_spec:\n"
+ " # these healthChecks are added so that does not become ready before\n"
+ " # a few things that Rancher sets up behind the scene are ready\n"
+ " healthChecks:\n"
+ " - apiVersion: apiextensions.k8s.io/v1\n"
+ " kind: CustomResourceDefinition\n"
+ " name: clusters.provisioning.cattle.io # this is because capi-rancher-import needs this\n"
+ " - apiVersion: apps/v1\n"
+ " kind: Deployment\n"
+ " name: rancher-webhook\n"
+ " namespace: cattle-system\n"
+ " - apiVersion: v1\n"
+ " kind: Service\n"
+ " name: rancher-webhook\n"
+ " namespace: cattle-system\n\n"
+ " rancher-custom-roles:\n"
+ " info:\n"
+ " description: configures custom roles for Rancher\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " rancher: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-custom-roles\n"
+ " wait: true\n\n"
+ " rancher-keycloak-oidc-provider:\n"
+ " info:\n"
+ " description: configures Rancher for Keycloak OIDC integration\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " rancher: true\n"
+ " rancher-custom-roles: true\n"
+ " keycloak: true\n"
+ " keycloak-resources: true\n"
+ " keycloak-oidc-external-secrets: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-keycloak-oidc-provider\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " KEYCLOAK_EXTERNAL_URL: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " RANCHER_EXTERNAL_URL: '{{ .Values.external_hostnames.rancher }}'\n"
+ " AUTH_USER_INFO_MAX_AGE_SECONDS: '{{ .Values.rancher.auth_user_info_max_age_seconds | quote }}'\n"
+ " AUTH_USER_INFO_RESYNC_CRON: '{{ .Values.rancher.auth_user_info_resync_cron | quote }}'\n"
+ " wait: true\n\n"
+ " k8s-gateway:\n"
+ " info:\n"
+ " description: installs k8s gateway (coredns + plugin to resolve external service names to ingress IPs)\n"
+ " details: >\n"
+ " is here only to allow for DNS resolution of Ingress hosts (FQDNs), used for importing workload clusters into Rancher and for flux-webui to use Keycloak SSO\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " metallb-resources: '{{ tuple . \"metallb\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://ori-edge.github.io/k8s_gateway/\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: k8s-gateway\n"
+ " version: 2.4.0\n"
+ " targetNamespace: k8s-gateway\n"
+ " driftDetection:\n"
+ " mode: enabled\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " upgrade:\n"
+ " force: true\n"
+ " values:\n"
+ " domain: '{{ .Values.cluster_domain }}'\n"
+ " replicaCount: 3\n"
+ " service:\n"
+ " annotations: '{{ .Values._internal.lb_service_annotations | default dict | include \"preserve-type\" }}'\n"
+ " # Following extraZonePlugins lines include all chart defaults plus the hosts plugin\n"
+ " extraZonePlugins:\n"
+ " - name: log\n"
+ " - name: errors\n"
+ " - name: health\n"
+ " configBlock: |-\n"
+ " lameduck 5s\n"
+ " - name: ready\n"
+ " - name: prometheus\n"
+ " parameters: 0.0.0.0:9153\n"
+ " - name: forward\n"
+ " parameters: . /etc/resolv.conf\n"
+ " - name: loop\n"
+ " - name: reload\n"
+ " - name: loadbalance\n"
+ " - name: hosts\n"
+ " configBlock: |-\n"
+ " {{- $display_external_ip := .Values.display_external_ip }}\n"
+ " {{- range $name,$domain := .Values.external_hostnames }}\n"
+ " {{ $display_external_ip }} {{ $domain }}\n"
+ " {{- end }}\n"
+ " fallthrough\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |\n"
+ " kind: Service\n"
+ " apiVersion: v1\n"
+ " metadata:\n"
+ " name: k8s-gateway\n"
+ " spec:\n"
+ " type: LoadBalancer\n"
+ " loadBalancerClass: '{{ .Values._internal.loadBalancerClass }}'\n"
+ " - patch: |\n"
+ " kind: Deployment\n"
+ " apiVersion: apps/v1\n"
+ " metadata:\n"
+ " name: k8s-gateway\n"
+ " spec:\n"
+ " template:\n"
+ " spec:\n"
+ " containers:\n"
+ " - name: k8s-gateway\n"
+ " securityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 1000\n"
+ " runAsUser: 1000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n\n"
+ " cluster-vip:\n"
+ " info:\n"
+ " description: Defines the cluster-vip Service for MetalLB load-balancing\n"
+ " details:\n"
+ " MetalLB will only handle the VIP if it has a corresponding service with endpoints,\n"
+ " but we don't want that the API access (6443) relies on kube-proxy, because on RKE2 agent nodes,\n"
+ " kube-proxy uses RKE2 internal load-balancing proxy that may fall-back to the VIP to access the API,\n"
+ " which could create a deadlock if endpoints are not up-to-date.\n\n"
+ " The cluster-vip Service that plays this role. This unit manages this resource, taking over\n"
+ " the control after the initial creation of this Service by a cloud-init post command on the first node).\n"
+ " internal: true\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"metallb\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/cluster-vip\n"
+ " wait: true\n"
+ " prune: false\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # in the general case, the cluster VIP is taken from .Values.cluster_virtual_ip\n"
+ " # except in the CAPO case where we don't know it beforehand in values and we take it\n"
+ " # from capo-cluster-resources ConfigMap (see substituteFrom below)\n"
+ " allocated_ip: '{{ tuple .Values.cluster_virtual_ip (not (.Values.cluster.capi_providers.infra_provider | eq \"capo\")) | include \"set-only-if\" }}'\n"
+ " lb_class: sylva.org/metallb-class\n"
+ " substituteFrom:\n"
+ " # see explanation above about allocated_ip / cluster_virtual_ip in CAPO case\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n\n"
+ " rancher-turtles:\n"
+ " info:\n"
+ " description: installs the Rancher Turtles operator, which enables the import of Cluster API workload clusters into the management cluster's Rancher\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " rancher: true\n"
+ " k8s-gateway: true\n"
+ " helm_repo_url: https://rancher.github.io/turtles\n"
+ " helmrelease_spec:\n"
+ " install:\n"
+ " disableHooks: true\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " kind: ClusterRole\n"
+ " name: rancher-turtles-aggregated-manager-role\n"
+ " paths:\n"
+ " - /rules\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-turtles\n"
+ " version: 0.22.0\n"
+ " targetNamespace: rancher-turtles-system\n"
+ " values:\n"
+ " rancherTurtles:\n"
+ " features:\n"
+ " agent-tls-mode:\n"
+ " enabled: true\n"
+ " cluster-api-operator:\n"
+ " enabled: false\n"
+ " cleanup: false\n"
+ " cluster-api:\n"
+ " enabled: false\n\n"
+ " ingress-nginx-init:\n"
+ " info:\n"
+ " description: creates the default certificate for the ingress-nginx controller\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/ingress-nginx-init\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLUSTER_IP: \"{{ .Values.display_external_ip }}\"\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: default-nginx-tls\n"
+ " namespace: kube-system\n\n"
+ " ingress-nginx:\n"
+ " info:\n"
+ " description: installs Nginx ingress controller\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ printf \"%dm\" ( include \"cluster-unit-timeout\" . | int) }}'\n"
+ " depends_on:\n"
+ " ingress-nginx-init: '{{ tuple . \"ingress-nginx-init\" | include \"unit-enabled\" }}'\n"
+ " metallb-resources: '{{ tuple . \"metallb\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.calico_readiness_unit }}': '{{ tuple . \"calico\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://rke2-charts.rancher.io\n"
+ " helm_chart_versions:\n"
+ " 4.12.401: '{{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}'\n"
+ " 4.12.600: '{{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}'\n"
+ " helmrelease_spec:\n"
+ " releaseName: rke2-ingress-nginx\n"
+ " targetNamespace: kube-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-ingress-nginx\n"
+ " version: \"\" # will be defined by helm_chart_versions\n"
+ " driftDetection:\n"
+ " mode: enabled\n"
+ " upgrade:\n"
+ " # Increase upgrade timeout as ingress-nginx pods have a longer terminationGracePeriodSeconds,\n"
+ " # with a special case for pods that were deployed with a previous version of sylva where it was set to 300s\n"
+ " timeout: >-\n"
+ " {{- $nginxDs := lookup \"apps/v1\" \"DaemonSet\" \"kube-system\" \"rke2-ingress-nginx-controller\" -}}\n"
+ " {{- if eq ($nginxDs | dig \"spec\" \"template\" \"spec\" \"terminationGracePeriodSeconds\" 90) 300 -}}\n"
+ " {{- print (mul 10 ($nginxDs | dig \"status\" \"desiredNumberScheduled\" 3)) \"m\" -}}\n"
+ " {{- else -}}\n"
+ " 30m\n"
+ " {{- end -}}\n"
+ " force: true\n"
+ " values:\n"
+ " fullnameOverride: rke2-ingress-nginx\n"
+ " controller:\n"
+ " # Decrease default terminationGracePeriodSeconds (of 300s) to fasten upgrades, and tune worker-shutdown-timeout accordingly (default is 240s)\n"
+ " terminationGracePeriodSeconds: 90\n"
+ " config:\n"
+ " worker-processes: 8\n"
+ " worker-shutdown-timeout: 60s\n"
+ " use-forwarded-headers: true\n"
+ " large-client-header-buffers: \"4 16k\"\n"
+ " # Install controllers only on control plane nodes, otherwise it may take a while to upgrade them successively on clusters with lot of nodes\n"
+ " nodeSelector:\n"
+ " node-role.kubernetes.io/control-plane: '{{ .Values._internal.cp_node_label_value }}'\n"
+ " kind: DaemonSet\n"
+ " service:\n"
+ " loadBalancerClass: '{{ .Values._internal.loadBalancerClass }}'\n"
+ " enabled: true\n"
+ " annotations: '{{ .Values._internal.lb_service_annotations | default dict | include \"preserve-type\" }}'\n"
+ " publishService:\n"
+ " enabled: true\n"
+ " ingressClassResource:\n"
+ " default: true\n"
+ " hostPort:\n"
+ " enabled: false\n"
+ " resources:\n"
+ " requests:\n"
+ " memory: \"300Mi\"\n"
+ " limits:\n"
+ " memory: \"1Gi\"\n"
+ " extraArgs:\n"
+ " default-ssl-certificate: \"kube-system/default-nginx-tls\"\n\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: apps/v1\n"
+ " kind: DaemonSet\n"
+ " name: rke2-ingress-nginx-controller\n"
+ " namespace: kube-system\n\n"
+ " first-login-rancher:\n"
+ " info:\n"
+ " description: configure Rancher authentication for admin\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " rancher: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/first-login-rancher\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " RANCHER_EXTERNAL_URL: '{{ .Values.external_hostnames.rancher }}'\n"
+ " CURRENT_TIME: '{{ now | date \"2006-01-02T15:04:05.999Z\" }}'\n"
+ " wait: true\n\n"
+ " flux-webui-init:\n"
+ " info:\n"
+ " description: initializes and configures flux-webui\n"
+ " maturity: beta\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"flux-webui\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.flux.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/flux-webui-init\n"
+ " targetNamespace: flux-system\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: flux-webui\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.flux }}'\n"
+ " CERT: '{{ .Values.external_certificates.flux.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.flux \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: flux-webui-tls\n"
+ " namespace: flux-system\n\n"
+ " flux-webui:\n"
+ " info:\n"
+ " description: installs Weave GitOps Flux web GUI\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " flux-system: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " coredns-config: '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}' # see https://gitlab.com/sylva-projects/sylva-core/-/merge_requests/1023#note_1694289969\n"
+ " keycloak-add-client-scope: '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " keycloak-oidc-external-secrets: '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " flux-webui-init: true\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Flux Web UI can be reached at https://{{ .Values.external_hostnames.flux }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " repo: weave-gitops\n"
+ " helm_chart_artifact_name: weave-gitops\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: charts/gitops-server\n"
+ " targetNamespace: flux-system\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " upgrade:\n"
+ " force: true\n"
+ " values:\n"
+ " logLevel: info\n"
+ " envVars:\n"
+ " - name: WEAVE_GITOPS_FEATURE_TENANCY\n"
+ " value: \"true\"\n"
+ " - name: WEAVE_GITOPS_FEATURE_CLUSTER\n"
+ " value: \"false\"\n"
+ " - name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL\n"
+ " value: \"Log in with Keycloak\"\n"
+ " installCRDs: true\n"
+ " podSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " runAsNonRoot: true\n"
+ " runAsGroup: 1000\n"
+ " runAsUser: 1000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " adminUser:\n"
+ " create: true\n"
+ " username: '{{ .Values.flux_webui.admin_user }}'\n"
+ " createSecret: false\n"
+ " rbac:\n"
+ " impersonationResourceNames: [\"admin\", \"sylva-admin@example.com\"] # the Keycloak username set in unit keycloak-resources; cannot use \"infra-admins\" group here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/427\n"
+ " additionalRules:\n"
+ " - apiGroups: [\"*\"]\n"
+ " resources: [\"*\"]\n"
+ " verbs: [ \"get\", \"list\", \"watch\" ]\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " className: nginx\n"
+ " hosts:\n"
+ " - host: '{{ .Values.external_hostnames.flux }}'\n"
+ " paths:\n"
+ " - path: / # setting this to another value like '/flux-webui' does not work (URLs coming back from flux webui aren't rewritten by nginx)\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - secretName: flux-webui-tls\n"
+ " hosts:\n"
+ " - '{{ .Values.external_hostnames.flux }}'\n"
+ " extraVolumes:\n"
+ " - name: custom-ca-cert\n"
+ " secret:\n"
+ " secretName: sylva-ca.crt\n"
+ " items:\n"
+ " - key: ca.crt\n"
+ " path: ca.crt\n"
+ " extraVolumeMounts:\n"
+ " - name: custom-ca-cert\n"
+ " mountPath: /etc/ssl/certs\n"
+ " readOnly: true\n"
+ " oidcSecret:\n"
+ " create: false\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |-\n"
+ " kind: ClusterRoleBinding\n"
+ " apiVersion: rbac.authorization.k8s.io/v1\n"
+ " metadata:\n"
+ " name: '{{ .Values.flux_webui.admin_user }}-user-read-resources-cr'\n"
+ " subjects:\n"
+ " - apiGroup: rbac.authorization.k8s.io\n"
+ " kind: User\n"
+ " name: '{{ .Values.flux_webui.admin_user }}'\n"
+ " - apiGroup: rbac.authorization.k8s.io\n"
+ " kind: User\n"
+ " name: sylva-admin@example.com # add same RBAC for the SSO user, so that when flux-webui SA impersonates it has privileges; cannot use \"infra-admins\" group here, see https://gitlab.com/sylva-projects/sylva-core/-/issues/427\n\n"
+ " prometheus-custom-metrics:\n"
+ " info:\n"
+ " description: Prometheus configuration for custom resource metrics\n"
+ " details: Adding podmonitors for flux controllers and create custom metrics for various resources by configuring kube-state-metrics\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/prometheus-custom-metrics\n"
+ " wait: false\n"
+ " force: true\n\n"
+ " monitoring-crd:\n"
+ " info:\n"
+ " description: installs monitoring stack CRDs\n"
+ " maturity: stable\n"
+ " hidden: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " # this unit provides the monitoring CRDs which Kyverno unit consumes\n"
+ " # so it cannot depend on kyverno\n"
+ " kyverno: false\n"
+ " kyverno-policies: false\n"
+ " namespace-defs: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://charts.rancher.io/\n"
+ " helmrelease_spec:\n"
+ " releaseName: rancher-monitoring-crd\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " storageNamespace: cattle-monitoring-system # see https://gitlab.com/sylva-projects/sylva-core/-/issues/443\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-monitoring-crd\n"
+ " version: 106.1.2+up69.8.2-rancher.7\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " grafana-init:\n"
+ " info:\n"
+ " description: sets up Grafana certificate for Keycloak OIDC integration\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"keycloak\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/dummy/base\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: grafana\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.grafana }}'\n"
+ " CERTIFICATE_NAMESPACE: cattle-monitoring-system\n"
+ " CERT: '{{ .Values.external_certificates.grafana.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../../tls-components/tls-secret\" \"../../tls-components/tls-certificate\" (hasKey .Values.external_certificates.grafana \"cert\") }}'\n"
+ " - \"../../tls-components/sylva-ca\"\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: sylva-ca.crt\n"
+ " namespace: cattle-monitoring-system\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: grafana-tls\n"
+ " namespace: cattle-monitoring-system\n\n"
+ " monitoring:\n"
+ " info:\n"
+ " description: installs monitoring stack\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " monitoring-crd: true\n"
+ " prometheus-resources: '{{ tuple . \"prometheus-resources\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: \"{{ printf \\\"%dm\\\" (mul 5 .Values.cluster.control_plane_replicas) }}\"\n"
+ " helm_repo_url: https://charts.rancher.io/\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " # The Prometheus Operator annotates 'PrometheusRule' resources with 'prometheus-operator-validated: true'\n"
+ " # after validation checks. This occurs only at apply time, so drift detection should ignore it.\n"
+ " kind: PrometheusRule\n"
+ " paths:\n"
+ " - /metadata/annotations/prometheus-operator-validated\n"
+ " - target:\n"
+ " # The certgen webhook injects the caBundle at runtime, so drift detection should ignore it.\n"
+ " kind: ValidatingWebhookConfiguration\n"
+ " paths:\n"
+ " - /webhooks[0]/clientConfig/caBundle\n"
+ " - target:\n"
+ " # The certgen webhook injects the caBundle at runtime, so drift detection should ignore it.\n"
+ " kind: MutatingWebhookConfiguration\n"
+ " paths:\n"
+ " - /webhooks[0]/clientConfig/caBundle\n"
+ " releaseName: rancher-monitoring\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " storageNamespace: cattle-monitoring-system # see https://gitlab.com/sylva-projects/sylva-core/-/issues/443\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-monitoring\n"
+ " version: 106.1.2+up69.8.2-rancher.7\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " kube-state-metrics:\n"
+ " extraArgs:\n"
+ " - --metric-labels-allowlist=namespaces=[field.cattle.io/projectId]\n"
+ " grafana:\n"
+ " sidecar:\n"
+ " dashboards:\n"
+ " enabled: true\n"
+ " searchNamespace: ALL\n"
+ " multicluster:\n"
+ " global:\n"
+ " enabled: true\n"
+ " etcd:\n"
+ " enabled: true\n"
+ " prometheus-adapter:\n"
+ " enabled: false\n"
+ " prometheus:\n"
+ " prometheusSpec:\n"
+ " scrapeInterval: \"60s\"\n"
+ " scrapeTimeout: \"30s\"\n"
+ " evaluationInterval: \"30s\"\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 6Gi\n"
+ " requests:\n"
+ " memory: 3Gi\n"
+ " storageSpec:\n"
+ " volumeClaimTemplate:\n"
+ " spec:\n"
+ " storageClassName: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " resources:\n"
+ " requests:\n"
+ " # default storage is 50Gi, except for CI and dev environments, 8Gi\n"
+ " storage: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"8Gi\" \"50Gi\" }}'\n"
+ " externalLabels:\n"
+ " cluster: '{{ .Values.cluster.name }}' # required for multi-cluster dashboards\n"
+ " platform_tag: '{{ .Values.monitoring.platform_tag }}'\n"
+ " remoteWriteDashboards: true\n"
+ " remoteWrite:\n"
+ " - url: '{{ .Values.monitoring.thanos.receive_url }}'\n"
+ " name: '{{ .Values.cluster.name }}'\n"
+ " basicAuth:\n"
+ " username:\n"
+ " name: thanos-basic-auth\n"
+ " key: username\n"
+ " password:\n"
+ " name: thanos-basic-auth\n"
+ " key: password\n"
+ " tlsConfig:\n"
+ " insecureSkipVerify: true\n"
+ " queueConfig:\n"
+ " batchSendDeadline: 5s\n"
+ " minBackoff: 1s\n"
+ " maxBackoff: 30s\n"
+ " alertmanager:\n"
+ " alertmanagerSpec:\n"
+ " useExistingSecret: true\n"
+ " configSecret: sylva-alertmanager-config # this Secret is a byproduct of the alertmanager-config unit\n"
+ " replicas: '{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 }}'\n"
+ " storage:\n"
+ " volumeClaimTemplate:\n"
+ " spec:\n"
+ " storageClassName: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " resources:\n"
+ " requests:\n"
+ " # default storage is 2Gi, except for CI and dev environments, 1Gi\n"
+ " storage: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"1Gi\" \"2Gi\" }}'\n"
+ " podAntiAffinity: hard #the scheduler is *required* to not schedule two replica pods onto the same node\n"
+ " podDisruptionBudget:\n"
+ " enabled: true\n"
+ " minAvailable: '{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 }}'\n"
+ " valuesFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: custom-resource-state-config # this configmap is a byproduct of the prometheus-custom-metrics unit\n"
+ " optional: '{{ not (tuple . \"prometheus-custom-metrics\" | include \"unit-enabled\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: prometheus-config-values # this configmap is a byproduct of the prometheus-resources unit\n"
+ " optional: '{{ not (tuple . \"prometheus-resources\" | include \"unit-enabled\") | include \"as-bool\" }}'\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n"
+ " helm_secret_values:\n"
+ " grafana:\n"
+ " adminPassword: '{{ .Values._internal.grafana_admin_password }}'\n"
+ " prometheus:\n"
+ " extraSecret:\n"
+ " name: thanos-basic-auth\n"
+ " data:\n"
+ " username: '{{ .Values._internal.thanos_user }}'\n"
+ " password: '{{ .Values._internal.thanos_password }}'\n\n"
+ " goldpinger-init:\n"
+ " info:\n"
+ " description: sets up Goldpinger prerequisites\n"
+ " details: it generates tls secret for goldpinger\n"
+ " internal: true\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"goldpinger\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.goldpinger.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/goldpinger-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: goldpinger\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.goldpinger }}'\n"
+ " CERTIFICATE_NAMESPACE: goldpinger\n"
+ " CERT: '{{ .Values.external_certificates.goldpinger.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.goldpinger \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " goldpinger:\n"
+ " info:\n"
+ " description: installs Goldpinger for pod-to-pod network observability\n"
+ " maturity: experimental\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: '{{ has .Values.env_type (list \"dev\" \"ci\") }}'\n"
+ " depends_on:\n"
+ " goldpinger-init: '{{ tuple . \"goldpinger-init\" | include \"unit-enabled\" }}'\n"
+ " namespace-defs: true\n"
+ " prometheus-resources: '{{ tuple . \"prometheus-resources\" | include \"unit-enabled\" }}'\n"
+ " openshift-security-context-constraints: '{{ eq .Values.cluster.capi_providers.bootstrap_provider \"cabpoa\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Goldpinger UI can be reached at https://{{ .Values.external_hostnames.goldpinger }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " helm_repo_url: https://bloomberg.github.io/goldpinger/\n"
+ " helmrelease_spec:\n"
+ " releaseName: goldpinger\n"
+ " targetNamespace: goldpinger\n"
+ " storageNamespace: goldpinger\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: goldpinger\n"
+ " version: 1.0.1\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " service:\n"
+ " type: ClusterIP\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " className: nginx\n"
+ " hosts:\n"
+ " - host: \"{{ .Values.external_hostnames.goldpinger }}\"\n"
+ " paths:\n"
+ " - path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - \"{{ .Values.external_hostnames.goldpinger }}\"\n"
+ " secretName: goldpinger-tls\n"
+ " rbac:\n"
+ " create: true\n"
+ " extraEnv:\n"
+ " - name: TCP_TARGETS\n"
+ " value: \"{{ .Values.cluster_virtual_ip }}:6443\"\n"
+ " - name: HTTP_TARGETS\n"
+ " value: \"http://goldpinger.goldpinger.svc.cluster.local:8081\"\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " nodeSelector: {}\n"
+ " tolerations: []\n\n"
+ " prometheus-resources:\n"
+ " info:\n"
+ " description: Creates required ConfigMaps and Kyverno policies to enable SNMP monitoring by Prometheus\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " metal3: '{{ tuple . \"metal3\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/prometheus-resources\n"
+ " force: true\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CONFIGMAPS: '\n"
+ " {{- $cm := list -}}\n"
+ " {{- if (tuple . \"snmp-exporter\" | include \"unit-enabled\") -}}\n"
+ " {{- $cm = append $cm \"prometheus-snmp-sd-files\" -}}\n"
+ " {{- $cm = append $cm \"prometheus-snmp-sd-files-bmh\" -}}\n"
+ " {{- end -}}\n"
+ " {{ $cm | toJson }}'\n"
+ " TARGETS: >\n"
+ " {{- $sylva_cluster := .Values.cluster.name -}}\n"
+ " {{- $result := list -}}\n"
+ " {{- range .Values.snmp.devices -}}\n"
+ " {{- $target := dict \"targets\" (list .target) -}}\n"
+ " {{- $_ := set $target \"labels\" (dict \"module\" .module \"auth\" .auth \"alias\" .alias \"cluster_name\" (.cluster_name | default $sylva_cluster)) -}}\n"
+ " {{- $result = append $result $target -}}\n"
+ " {{- end -}}\n"
+ " {{ $result | toJson }}\n"
+ " _components:\n"
+ " - '{{ tuple \"components/kyverno-snmp-bmh-policy\" (tuple . \"metal3\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n\n"
+ " alertmanager-jiralert:\n"
+ " info:\n"
+ " description: installs Alertmanager webhook Jiralert\n"
+ " details: Jiralert is an Alertmanager wehbook that creates Jira issues\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | empty | not }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " alertmanager-jiralert-config: true\n"
+ " helm_repo_url: https://prometheus-community.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: jiralert\n"
+ " version: 1.8.1\n"
+ " values:\n"
+ " extraArgs:\n"
+ " - -log.level=info\n"
+ " - -hash-jira-label\n"
+ " existingConfigSecret: sylva-alertmanager-webhook-jiralert # this Secret is a byproduct of the alertmanager-jiralert-config unit\n"
+ " fullnameOverride: 'alertmanager-jiralert'\n"
+ " podAnnotations:\n"
+ " sylva/jiralert-config: '{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | toJson | sha256sum | trunc 8 }}'\n"
+ " env:\n"
+ " http_proxy: '{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.http_proxy | default .Values.proxies.http_proxy }}'\n"
+ " https_proxy: '{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.https_proxy | default .Values.proxies.https_proxy }}'\n"
+ " no_proxy: '{{ .Values.monitoring.alertmanager.webhooks.jiralert.env.no_proxy | default (include \"sylva-units.no_proxy\" (tuple .)) }}'\n"
+ " replicaCount: '{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}'\n"
+ " podDisruptionBudget:\n"
+ " minAvailable: '{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}'\n"
+ " unhealthyPodEvictionPolicy: AlwaysAllow\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n\n"
+ " alertmanager-jiralert-config:\n"
+ " info:\n"
+ " description: generates the config for Jiralert Alertmanager webhook\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"alertmanager-jiralert\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " repo: sylva-alertmanager-resources\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: webhooks/jiralert\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " helm_secret_values:\n"
+ " config: '{{ .Values.monitoring.alertmanager.webhooks.jiralert.config | include \"preserve-type\" }}'\n\n"
+ " alertmanager-snmp-notifier:\n"
+ " info:\n"
+ " description: installs Alertmanager webhook snmp-notifier\n"
+ " details: snmp-notifier is an Alertmanager wehbook that sends alerts as snmp traps\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " # only enable if SNMP traps destinations are set\n"
+ " - '{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.destinations | empty | not }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://prometheus-community.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: alertmanager-snmp-notifier\n"
+ " version: 2.1.0\n"
+ " values:\n"
+ " snmpNotifier:\n"
+ " extraArgs:\n"
+ " '{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.final_extra_args }}'\n"
+ " snmpDestinations:\n"
+ " '{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.destinations | default list | include \"preserve-type\" }}'\n"
+ " fullnameOverride: 'alertmanager-snmp-notifier'\n"
+ " extraConfigmapMounts:\n"
+ " - name: snmp-notifier-templates\n"
+ " configMap: sylva-alertmanager-webhook-snmp-notifier # this ConfigMap is a byproduct of the alertmanager-snmp-notifier-config unit\n"
+ " mountPath: /etc/snmp_notifier/\n"
+ " readOnly: true\n"
+ " podAnnotations:\n"
+ " sylva/snmp-notifier-config: '{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config | toJson | sha256sum | trunc 8 }}'\n"
+ " replicaCount: '{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}'\n"
+ " podDisruptionBudget:\n"
+ " minAvailable: '{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}'\n"
+ " unhealthyPodEvictionPolicy: AlwaysAllow\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " runAsUser: 65535\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " resources:\n"
+ " limits:\n"
+ " cpu: \"0.2\"\n"
+ " memory: \"128Mi\"\n"
+ " requests:\n"
+ " cpu: \"0.1\"\n"
+ " memory: \"64Mi\"\n"
+ " helm_secret_values:\n"
+ " snmpNotifier:\n"
+ " snmpCommunity: \"{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.community }}\"\n"
+ " snmpAuthenticationUsername: \"{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.username }}\"\n"
+ " snmpAuthenticationPassword: \"{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.password }}\"\n"
+ " snmpPrivatePassword: \"{{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.priv_password }}\"\n\n"
+ " alertmanager-snmp-notifier-config:\n"
+ " info:\n"
+ " description: generates the config for snmp-notifier Alertmanager webhook\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"alertmanager-snmp-notifier\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " repo: sylva-alertmanager-resources\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: webhooks/snmp-notifier\n"
+ " targetNamespace: cattle-monitoring-system\n\n"
+ " alertmanager-config:\n"
+ " info:\n"
+ " description: generates the config for Alertmanager\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.units.monitoring.helmrelease_spec.values.alertmanager.alertmanagerSpec.useExistingSecret }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " repo: sylva-alertmanager-resources\n"
+ " helm_chart_artifact_name: alertmanager-config\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: config\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " helm_secret_values:\n"
+ " config: '{{ .Values.monitoring.alertmanager.config | include \"preserve-type\" }}'\n\n"
+ " prometheus-pushgateway:\n"
+ " info:\n"
+ " description: installs Prometheus Push-gateway exporter\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring: true\n"
+ " helm_repo_url: https://prometheus-community.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " releaseName: prometheus-pushgateway\n"
+ " targetNamespace: pushgateway\n"
+ " storageNamespace: pushgateway\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: prometheus-pushgateway\n"
+ " version: 3.4.1\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " persistentVolume:\n"
+ " enabled: true\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " extraArgs:\n"
+ " - --persistence.file=/data/pushgateway.data\n"
+ " - --persistence.interval=5m\n"
+ " extraContainers:\n"
+ " - name: prometheus-pushgateway-metrics-purger\n"
+ " image: curlimages/curl:8.15.0\n"
+ " command:\n"
+ " - sh\n"
+ " - -c\n"
+ " - |\n"
+ " while true\n"
+ " do\n"
+ " del_req=\"curl -X DELETE http://localhost:9091/metrics/job/\"\n"
+ " curl -s http://localhost:9091/metrics | \\\n"
+ " grep push_time_seconds | \\\n"
+ " grep -Ev '^#' | \\\n"
+ " while read line\n"
+ " do\n"
+ " last_pushed=$(printf \"%.f\" `echo $line | awk '{print $2}'`)\n"
+ " job_name=$(echo $line | \\\n"
+ " awk -F '}' '{print $1}' | \\\n"
+ " grep -o 'job=.*' | \\\n"
+ " cut -f1 -d ',' | \\\n"
+ " cut -f2 -d'=' | \\\n"
+ " tr -d '\"')\n"
+ " std_unix_time_now=$(date +%s)\n"
+ " interval_seconds=$((std_unix_time_now - last_pushed))\n"
+ " [ $interval_seconds -gt 180 ] \\\n"
+ " && eval $del_req$job_name && echo \"$(date), Deleted job group - $job_name\" \\\n"
+ " || echo \"$(date), Purge action skipped. Interval not satisfied\" # adjust interval_seconds as per requirement\n"
+ " done\n"
+ " sleep 3600\n"
+ " done\n"
+ " securityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " runAsNonRoot: true\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n\n"
+ " ephemeral-storage-exporter:\n"
+ " info:\n"
+ " description: installs exporter for extracting ephemeral storage metrics from a Kubernetes cluster\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring: true\n"
+ " helm_repo_url: https://jmcgrath207.github.io/k8s-ephemeral-storage-metrics/chart\n"
+ " helmrelease_spec:\n"
+ " releaseName: ephemeral-storage-exporter\n"
+ " targetNamespace: cattle-monitoring-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: k8s-ephemeral-storage-metrics\n"
+ " version: 1.18.2\n"
+ " values:\n"
+ " fullnameOverride: 'ephemeral-storage-exporter'\n"
+ " replicas: 1\n"
+ " log_level: info\n"
+ " serviceMonitor:\n"
+ " enable: true\n"
+ " prometheus:\n"
+ " enable: true\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n\n"
+ " kepler:\n"
+ " info:\n"
+ " description: installs Kepler (Kubernetes-based Efficient Power Level Exporter) exporter for Prometheus\n"
+ " maturity: stable\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring-crd: true\n"
+ " helm_repo_url: https://sustainable-computing-io.github.io/kepler-helm-chart\n"
+ " helmrelease_spec:\n"
+ " releaseName: kepler\n"
+ " targetNamespace: kepler\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: kepler\n"
+ " version: 0.6.1\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " extraEnvVars:\n"
+ " # set the Prometheus scrape interval for the kepler ServiceMonitor\n"
+ " PROMETHEUS_SCRAPE_INTERVAL: 30s\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " labels:\n"
+ " release: rancher-monitoring\n"
+ " redfish:\n"
+ " enabled: false\n\n"
+ " snmp-exporter:\n"
+ " info:\n"
+ " description: installs SNMP exporter\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.snmp.auth | empty | not }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring: true\n"
+ " snmp-exporter-config: true\n"
+ " helm_repo_url: https://prometheus-community.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " releaseName: snmp-exporter\n"
+ " targetNamespace: snmp-exporter\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: prometheus-snmp-exporter\n"
+ " version: 9.6.2\n"
+ " values:\n"
+ " fullnameOverride: 'snmp-exporter'\n"
+ " replicas: 1\n"
+ " extraArgs:\n"
+ " - \"--config.file=/config/snmp.yaml\"\n"
+ " podAnnotations:\n"
+ " sylva/snmp-values: '{{ .Values.snmp | toJson | sha256sum | trunc 8 }}'\n"
+ " serviceMonitor:\n"
+ " enabled: false\n"
+ " securityContext:\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " extraConfigmapMounts: # this ConfigMap is a byproduct of the snmp-exporter-config unit\n"
+ " - name: snmp-config\n"
+ " mountPath: /config\n"
+ " configMap: snmp-exporter-config\n\n"
+ " snmp-exporter-config:\n"
+ " info:\n"
+ " description: contains OID files and generates configuration needed by the snmp-exporter\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"snmp-exporter\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " repo: sylva-snmp-resources\n"
+ " helm_chart_artifact_name: sylva-snmp-resources\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: snmp-exporter\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " helm_secret_values:\n"
+ " auth: '{{ .Values.snmp.auth | include \"preserve-type\" }}'\n\n"
+ " sylva-dashboards:\n"
+ " info:\n"
+ " description: adds Sylva-specific Grafana dashboards\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring: true\n"
+ " repo: sylva-dashboards\n"
+ " helm_chart_artifact_name: sylva-dashboards\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: sylva-dashboards\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " namespace: sylva-dashboards\n"
+ " optional_dashboards: '{{ .Values._internal.monitoring.conditionals | include \"preserve-type\" }}'\n\n"
+ " multus-init:\n"
+ " info:\n"
+ " description: reconfigure Calico to prevent it from installing some CNI binaries concurrently with Multus\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"calico\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"multus\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " kyverno: true\n"
+ " calico: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/skip-calico-cni\n"
+ " wait: true\n\n"
+ " multus:\n"
+ " info:\n"
+ " description: installs Multus\n"
+ " maturity: stable\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " '{{ .Values._internal.calico_readiness_unit }}': '{{ tuple . \"calico\" | include \"unit-enabled\" }}'\n"
+ " multus-init: '{{ tuple . \"multus-init\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " prune: false # This prevents the deletion of Multus resources when the unit is disabled after an initial deployment\n"
+ " helm_repo_url: https://rke2-charts.rancher.io/\n"
+ " helm_chart_versions:\n"
+ " v4.2.106: '{{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}'\n"
+ " v4.2.205: '{{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}'\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-multus\n"
+ " version: \"\" # will be defined by helm_chart_versions\n"
+ " targetNamespace: kube-system\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " rke2-whereabouts:\n"
+ " enabled: true\n"
+ " tolerations: []\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |-\n"
+ " kind: DaemonSet\n"
+ " apiVersion: apps/v1\n"
+ " metadata:\n"
+ " name: multus-rke2-whereabouts\n"
+ " namespace: kube-system\n"
+ " labels:\n"
+ " $patch: delete\n"
+ " app: whereabouts\n"
+ " - patch: |-\n"
+ " kind: ServiceAccount\n"
+ " apiVersion: v1\n"
+ " metadata:\n"
+ " name: multus-rke2-whereabouts\n"
+ " namespace: kube-system\n"
+ " labels:\n"
+ " $patch: delete\n"
+ " app: whereabouts\n"
+ " - patch: |-\n"
+ " kind: DaemonSet\n"
+ " apiVersion: apps/v1\n"
+ " metadata:\n"
+ " name: multus\n"
+ " namespace: kube-system\n"
+ " spec:\n"
+ " template:\n"
+ " metadata:\n"
+ " annotations:\n"
+ " sylva/force-reinstall-cni: done\n\n"
+ " multus-cleanup:\n"
+ " info:\n"
+ " description: periodically cleans multus cache\n"
+ " details: >\n"
+ " Multus does not cleans up /var/lib/cni/multus automatically,\n"
+ " leading to inodes starvation on the host file-system.\n"
+ " We need to handle cleanup by ourselves for now.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"multus\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " multus: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/multus-cleanup\n"
+ " targetNamespace: kube-system\n"
+ " wait: true\n\n"
+ " multus-ready:\n"
+ " info:\n"
+ " description: checks that Multus is ready\n"
+ " details: >\n"
+ " This unit only has dependencies, it does not create resources.\n"
+ " It performs healthchecks outside of the multus unit,\n"
+ " in order to properly target workload cluster when we deploy multus in it.\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - dummy\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"multus\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " multus: true\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: apps/v1\n"
+ " kind: DaemonSet\n"
+ " name: multus\n"
+ " namespace: kube-system\n"
+ " - apiVersion: apps/v1\n"
+ " kind: DaemonSet\n"
+ " name: multus-rke2-whereabouts\n"
+ " namespace: kube-system\n\n"
+ " multus-uninstall-cleanup:\n"
+ " info:\n"
+ " description: deletes Multus resources that are not cleaned up by the uninstall process\n"
+ " details: >\n"
+ " When Multus is uninstalled, some resources remain on the node.\n"
+ " This unit cleans up those resources to ensure a clean state.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values._internal.state.is_multus_uninstall }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/multus-uninstall-cleanup\n"
+ " wait: true\n\n"
+ " multus-helmrelease-cleanup:\n"
+ " info:\n"
+ " description: removes the Multus HelmRelease resource when the unit is uninstalled\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"multus-uninstall-cleanup\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " multus-uninstall-cleanup: true\n"
+ " kustomization_spec:\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict \"RELEASE_NAMESPACE\" .Release.Namespace )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/multus-delete-helmrelease.sh\") }}'\n\n"
+ " sriov-crd:\n"
+ " info:\n"
+ " description: installs CRDs for SR-IOV Network operator\n"
+ " maturity: stable\n"
+ " hidden: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"sriov-network-operator\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " multus-ready: true\n"
+ " helm_repo_url: https://suse-edge.github.io/charts/\n"
+ " helmrelease_spec:\n"
+ " releaseName: sriov-crd\n"
+ " targetNamespace: cattle-sriov-system\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: sriov-crd\n"
+ " version: 1.5.2+up1.5.0\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " sriov-network-operator:\n"
+ " info:\n"
+ " description: installs SR-IOV operator\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: '{{ or (tuple . \"sriov\" | include \"unit-enabled\") (not (empty .Values.sriov.node_policies)) }}' # for backward compatibility\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sriov-crd: true\n"
+ " kyverno: true\n"
+ " helm_repo_url: https://suse-edge.github.io/charts/\n"
+ " helmrelease_spec:\n"
+ " releaseName: sriov\n"
+ " targetNamespace: cattle-sriov-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: sriov-network-operator\n"
+ " version: 1.5.2+up1.5.0\n"
+ " values:\n"
+ " operator:\n"
+ " admissionControllers:\n"
+ " enabled: true\n"
+ " certificates:\n"
+ " secretNames:\n"
+ " operator: \"operator-webhook-cert\"\n"
+ " injector: \"network-resources-injector-cert\"\n"
+ " certManager:\n"
+ " enabled: true\n"
+ " generateSelfSigned: true\n"
+ " kustomization_spec:\n"
+ " _components:\n"
+ " - ../kyverno-policies/sriov-network-operator\n\n"
+ " sriov:\n"
+ " info:\n"
+ " description: obsolete - replaced by sriov-network-operator\n"
+ " details: dummy unit which only enables sriov-network-operator for backwark compatibility\n"
+ " internal: true\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - dummy\n\n"
+ " sriov-resources:\n"
+ " info:\n"
+ " description: configures SRIOV resources\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ not (empty .Values.sriov.node_policies) }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sriov-network-operator: true\n"
+ " repo: sriov-resources\n"
+ " helm_chart_artifact_name: sriov-resources\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: cattle-sriov-system\n"
+ " values:\n"
+ " node_policies: '{{ .Values.sriov.node_policies | include \"preserve-type\" }}'\n\n"
+ " rke2-calico-coredns-cleanup:\n"
+ " info:\n"
+ " description: remove rke2-coredns and rke2-calico-crd HelmChart resources\n"
+ " internal: true\n"
+ " one_shot: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" }}'\n"
+ " - '{{ .Values._internal.state.is_upgrade }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: kube-system\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/rke2-calico-coredns-cleanup.sh\") }}'\n\n"
+ " coredns:\n"
+ " info:\n"
+ " description: installs rke2-coredns\n"
+ " labels:\n"
+ " sylva-units/protected: \"\"\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ or (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\")\n"
+ " (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\")\n"
+ " }}'\n"
+ " # with kubeadm, this unit is just a plain dummy unit that is here only to ensure\n"
+ " # that on upgrade from Sylva 1.3 the 'coredns' Kustomization (functionally replaced\n"
+ " # by the 'coredns-config' Kustomization) is patched with \"prune: false\" instead\n"
+ " # of being deleted\n"
+ " depends_on:\n"
+ " coredns-config: '{{ tuple . \"coredns-config\" | include \"unit-enabled\" }}'\n"
+ " rke2-calico-coredns-cleanup: '{{ tuple . \"rke2-calico-coredns-cleanup\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " prune: '{{ tuple false ( .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\") | include \"set-only-if\" }}'\n"
+ " helm_repo_url: https://rke2-charts.rancher.io\n"
+ " helm_chart_versions:\n"
+ " 1.42.302: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}\n"
+ " 1.43.302: >-\n"
+ " {{ include \"k8s-version-match\" (tuple \">=1.31.0\" .Values._internal.k8s_version) }}\n"
+ " helmrelease_spec:\n"
+ " suspend: '{{ tuple true ( .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\") | include \"set-only-if\" }}'\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " # deleting configmap is a desired action, in this way we reconfigure coredns\n"
+ " # in case of okd deployments, drift detection should ignore it\n"
+ " kind: Deployment\n"
+ " name: rke2-coredns-rke2-coredns\n"
+ " paths:\n"
+ " - /spec/template/spec/volumes/0/configMap/items\n"
+ " - target:\n"
+ " kind: Service\n"
+ " name: rke2-coredns-rke2-coredns\n"
+ " paths:\n"
+ " - /spec/ports\n"
+ " releaseName: rke2-coredns\n"
+ " targetNamespace: kube-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rke2-coredns\n"
+ " version: \"\" # will be defined by helm_chart_versions\n"
+ " # by adding option \"skipConfig: true\" into values, the chart will not\n"
+ " # create/control the configMap responsible with the coredns configuration\n"
+ " # and let coredns-config unit to manage it\n"
+ " values: >-\n"
+ " {{ $value := (dict \"deployment\" (dict \"skipConfig\" \"true\")) }}\n"
+ " {{ tuple $value .Values.cluster.coredns.helm_values | include \"merge-append\" }}\n"
+ " {{ $value | include \"preserve-type\" }}\n\n"
+ " coredns-config:\n"
+ " info:\n"
+ " description: configures DNS inside cluster\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " rke2-calico-coredns-cleanup: '{{ tuple . \"rke2-calico-coredns-cleanup\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/coredns\n"
+ " wait: true\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: ConfigMap\n"
+ " name: coredns\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /metadata/name\n"
+ " value: >-\n"
+ " {{- if eq .Values.cluster.capi_providers.bootstrap_provider \"cabpk\" }}\n"
+ " coredns\n"
+ " {{- else if eq .Values.cluster.capi_providers.bootstrap_provider \"cabpck\" }}\n"
+ " ck-dns-coredns\n"
+ " {{- else }}\n"
+ " rke2-coredns-rke2-coredns\n"
+ " {{- end }}\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLUSTER_VIRTUAL_IP: '{{ .Values.cluster_virtual_ip }}'\n"
+ " CLUSTER_DOMAIN: '{{ .Values.cluster_domain }}'\n\n"
+ " nfs-ganesha-init:\n"
+ " info:\n"
+ " description: Define persistent volume claim for NFS Ganesha\n"
+ " maturity: experimental\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"nfs-ganesha\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/nfs-ganesha-init\n"
+ " targetNamespace: nfs-ganesha\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " storage_size: 5Gi\n"
+ " storage_class: '{{ .Values._internal.default_storage_class }}'\n\n"
+ " nfs-ganesha:\n"
+ " info:\n"
+ " description: manages NFS Ganesha CSI provisioner\n"
+ " maturity: experimental\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " nfs-ganesha-init: true\n"
+ " helm_repo_url: https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner/\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: nfs-ganesha\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: nfs-server-provisioner\n"
+ " version: 1.8.0\n"
+ " values:\n"
+ " replicaCount: 1\n"
+ " extraArgs:\n"
+ " device-based-fsids: false\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " existingClaim: nfs-ganesha-data-pvc\n"
+ " storageClass:\n"
+ " name: nfs-ganesha\n"
+ " allowVolumeExpansion: true\n"
+ " mountOptions:\n"
+ " - vers=4.1\n"
+ " - retrans=2\n"
+ " - timeo=30\n"
+ " image:\n"
+ " repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/nfs-provisioner\n"
+ " tag: 6.5.custom # built based on https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner/pull/152 and embedding https://github.com/nfs-ganesha/ntirpc/tree/a392d47b26e216cbfcc362ed146c94b98894394a (fix from https://github.com/nfs-ganesha/ntirpc/pull/318)\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/containers/0/livenessProbe\n"
+ " value:\n"
+ " exec:\n"
+ " command:\n"
+ " - sh\n"
+ " - -c\n"
+ " - rpcinfo -t localhost nfs || exit 1\n"
+ " initialDelaySeconds: 30\n"
+ " periodSeconds: 10\n"
+ " timeoutSeconds: 5\n"
+ " failureThreshold: 3\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/containers/0/readinessProbe\n"
+ " value:\n"
+ " exec:\n"
+ " command:\n"
+ " - sh\n"
+ " - -c\n"
+ " - rpcinfo -t localhost nfs || exit 1\n"
+ " initialDelaySeconds: 10\n"
+ " periodSeconds: 5\n"
+ " timeoutSeconds: 3\n"
+ " failureThreshold: 2\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " patch: |-\n"
+ " - op: add\n"
+ " path: /spec/template/spec/volumes/-\n"
+ " value:\n"
+ " name: vfs-template\n"
+ " configMap:\n"
+ " name: ganesha-vfs-template\n"
+ " - op: add\n"
+ " path: /spec/template/spec/volumes/-\n"
+ " value:\n"
+ " name: vfs-conf-update\n"
+ " configMap:\n"
+ " name: vfs-conf-update\n"
+ " defaultMode: 0644\n"
+ " - op: add\n"
+ " path: /spec/template/spec/initContainers\n"
+ " value:\n"
+ " - name: vfs-conf-update\n"
+ " image: busybox:1.36.0\n"
+ " imagePullPolicy: IfNotPresent\n"
+ " securityContext:\n"
+ " runAsUser: 0\n"
+ " runAsGroup: 0\n"
+ " command:\n"
+ " - /bin/sh\n"
+ " - /etc/ganesha/scripts/vfs-conf-update.sh\n"
+ " volumeMounts:\n"
+ " - name: data\n"
+ " mountPath: /export\n"
+ " - name: vfs-template\n"
+ " mountPath: /etc/ganesha\n"
+ " readOnly: true\n"
+ " - name: vfs-conf-update\n"
+ " mountPath: /etc/ganesha/scripts\n"
+ " readOnly: true\n\n"
+ " test-nfs-ganesha:\n"
+ " enabled: '{{ .Values.env_type | eq \"ci\" }}'\n"
+ " info:\n"
+ " description: Perform testing for RWX enabled PVs created from NFS Ganesha\n"
+ " internal: true\n"
+ " test: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " nfs-ganesha: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"nfs-ganesha\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/test-nfs-ganesha\n"
+ " targetNamespace: nfs-ganesha\n"
+ " wait: true\n"
+ " force: true\n\n"
+ " ceph-csi-cephfs:\n"
+ " info:\n"
+ " description: Installs Ceph-CSI\n"
+ " maturity: beta\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " openshift-security-context-constraints: '{{ eq .Values.cluster.capi_providers.bootstrap_provider \"cabpoa\" }}'\n"
+ " helm_repo_url: https://ceph.github.io/csi-charts\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: ceph-csi-cephfs\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: ceph-csi-cephfs\n"
+ " version: 3.14.0\n"
+ " values:\n"
+ " provisioner:\n"
+ " replicaCount: '{{ .Values.cluster.control_plane_replicas }}'\n"
+ " storageClass:\n"
+ " create: true\n"
+ " name: cephfs-csi\n"
+ " clusterID: '{{ .Values.ceph.cephfs_csi.clusterID }}'\n"
+ " fsName: '{{ .Values.ceph.cephfs_csi.fs_name }}'\n"
+ " annotations:\n"
+ " storageclass.kubernetes.io/is-default-class: '{{ .Values._internal.default_storage_class | eq \"cephfs-csi\" }}'\n"
+ " csiConfig:\n"
+ " - clusterID: '{{ .Values.ceph.cephfs_csi.clusterID }}'\n"
+ " monitors: '{{ .Values.ceph.cephfs_csi.monitors_ips | include \"preserve-type\" }}'\n"
+ " helm_secret_values:\n"
+ " secret:\n"
+ " create: true\n"
+ " userID: '{{ .Values.ceph.cephfs_csi.adminID }}'\n"
+ " userKey: '{{ .Values.ceph.cephfs_csi.adminKey }}'\n\n"
+ " longhorn-crd:\n"
+ " info:\n"
+ " description: installs Longhorn CRDs\n"
+ " maturity: stable\n"
+ " hidden: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " helm_repo_url: https://charts.rancher.io/\n"
+ " helmrelease_spec:\n"
+ " releaseName: longhorn-crd\n"
+ " targetNamespace: longhorn-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: longhorn-crd\n"
+ " version: 106.2.1+up1.8.2\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " longhorn:\n"
+ " info:\n"
+ " description: installs Longhorn CSI\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ eq .Values.cluster.capi_providers.infra_provider \"capm3\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " longhorn-crd: true\n"
+ " monitoring-crd: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " # Ensure that all volumes are in a healthy state prior to upgrade longhorn\n"
+ " longhorn-volumes-healthy: '{{ .Values._internal.state.is_upgrade }}'\n"
+ " # Ensure that longhorn is upgraded after cluster. This should be inherited from base-deps,\n"
+ " # but we add this as a safeguard, as some unit could add an indirect dependency of cluster to longhorn.\n"
+ " # Following dependency will prevent that as it would create a loop in such cases.\n"
+ " cluster-machines-ready: '{{ .Values._internal.state.is_upgrade }}'\n"
+ " helm_repo_url: https://charts.rancher.io/\n"
+ " helmrelease_spec:\n"
+ " releaseName: longhorn\n"
+ " targetNamespace: longhorn-system\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: longhorn\n"
+ " version: 106.2.1+up1.8.2\n"
+ " values:\n"
+ " metrics:\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " defaultSettings:\n"
+ " createDefaultDiskLabeledNodes: true\n"
+ " allowVolumeCreationWithDegradedAvailability: false\n"
+ " storageMinimalAvailablePercentage: 10\n"
+ " replicaReplenishmentWaitInterval: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" | ternary 3600 600 | include \"preserve-type\" }}'\n"
+ " nodeDrainPolicy: block-for-eviction-if-contains-last-replica\n"
+ " concurrentAutomaticEngineUpgradePerNodeLimit: 3\n"
+ " storageReservedPercentageForDefaultDisk: 0 # Percentage of each default disk to reserve (not used for volume scheduling).\n"
+ " # Intended to prevent overfilling OS-shared disks.\n"
+ " # Set to 0 to make full disk space schedulable.\n"
+ " persistence:\n"
+ " defaultClass: '{{ .Values._internal.default_storage_class | eq \"longhorn\" | include \"preserve-type\" }}'\n\n"
+ " longhorn-volumes-healthy:\n"
+ " info:\n"
+ " description: wait for all longhorn volumes to be in a healthy state (attached/healthy or detached/unknown)\n"
+ " internal: true\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: 30m\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values._internal.state.is_upgrade }}'\n"
+ " depends_on:\n"
+ " cluster-machines-ready: '{{ .Values._internal.state.is_upgrade }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " targetNamespace: longhorn-system\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/longhorn-volumes-healthy.sh\") }}'\n\n"
+ " longhorn-engine-image-cleanup:\n"
+ " info:\n"
+ " description: kyverno cleanup policy to delete old Longhorn engineimages that are left-over after upgrade\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " - '{{ tuple . \"kyverno\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " longhorn: true\n"
+ " kyverno: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/longhorn-engine-image-cleanup\n\n"
+ " longhorn-update-stale-replica-timeout:\n"
+ " info:\n"
+ " description: Kyverno policy to set the staleReplicaTimeout value to 60 for all Longhorn volumes\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " longhorn: '{{ not .Values._internal.state.is_upgrade }}'\n"
+ " kyverno: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kyverno-policies/update-stalereplicatimeout\n\n"
+ " longhorn-instance-manager-cleanup:\n"
+ " info:\n"
+ " description: cronjob to cleanup Longhorn instance-manager pods that are preventing node drain\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-cronjob\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NAMESPACE: longhorn-system\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: CronJob\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /spec/schedule\n"
+ " value: \"*/5 * * * *\" # Every 5 minutes\n"
+ " - target:\n"
+ " kind: ClusterRole\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /rules\n"
+ " value:\n"
+ " - apiGroups: [\"\"]\n"
+ " resources: [nodes]\n"
+ " verbs: [list, get, patch]\n"
+ " - '{{ include \"kube-cronjob-replace-script-patch\" (.Files.Get \"scripts/longhorn-instance-manager-cleanup.sh\") }}'\n\n"
+ " rancher-default-roles:\n"
+ " info:\n"
+ " description: Create Rancher role templates\n"
+ " details: |\n"
+ " This unit creates a set of additional role templates which are likely to be needed by many\n"
+ " clusters.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " rancher: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-default-roles\n\n"
+ " os-images-info:\n"
+ " info:\n"
+ " description: Creates a list of os images\n"
+ " details: |\n"
+ " This unit creates a configmap containing information on operating system images for use with CAPO and CAPM3.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ or (tuple . \"capm3\" | include \"unit-enabled\")\n"
+ " (.Values.cluster.capi_providers.infra_provider | eq \"capo\")\n"
+ " (.Values.cluster.capi_providers.infra_provider | eq \"capm3\")\n"
+ " }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " kustomization_spec:\n"
+ " _components:\n"
+ " - '{{ tuple \"./components/certs\" (not .Values.oci_registry_insecure) | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " JOB_CHECKSUM: |-\n"
+ " {{- tuple . \"no_proxy_additional\" | include \"interpret\" -}}\n"
+ " {{\n"
+ " tuple . (list\n"
+ " .Values._internal.sylva_core_version\n"
+ " (include \"generate-os-images\" .)\n"
+ " .Values.proxies\n"
+ " .Values.no_proxy_additional\n"
+ " .Values.oci_registry_insecure\n"
+ " ) | include \"interpret-and-hash\"\n"
+ " }}\n"
+ " EXTRA_CA_CERTS: '{{ tuple (.Values.oci_registry_extra_ca_certs | default \"\" | b64enc) (not .Values.oci_registry_insecure) | include \"set-only-if\" }}'\n"
+ " _patches:\n"
+ " - '{{ include \"kube-job-replace-image-patch\" .Values._internal.oci_tools_image }}'\n"
+ " - '{{ include \"kube-job-add-env-var-patch\" (dict\n"
+ " \"https_proxy\" .Values.mgmt_cluster_state_values.proxies.https_proxy\n"
+ " \"no_proxy\" .Values.mgmt_cluster_state_values.proxies.no_proxy\n"
+ " \"oci_registry_insecure\" .Values.oci_registry_insecure\n"
+ " \"SKIP_IMAGE_VERIFICATION\" .Values.security.os_images.skip_signing_check\n"
+ " )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/create-os-images-info.sh\") }}'\n"
+ " - '{{ include \"kube-job-add-files-to-configmap-patch\" (dict \"images.yaml\" (include \"generate-os-images\" .)) }}'\n"
+ " wait: false\n"
+ " healthChecks:\n"
+ " - apiVersion: batch/v1\n"
+ " kind: Job\n"
+ " name: os-images-info\n"
+ " namespace: '{{ .Release.Namespace }}'\n"
+ " - apiVersion: v1\n"
+ " kind: ConfigMap\n"
+ " name: os-images-info\n"
+ " namespace: '{{ .Release.Namespace }}'\n\n"
+ " os-image-server:\n"
+ " info:\n"
+ " description: >\n"
+ " Deploys a web server on management cluster\n"
+ " which serves OS images for baremetal clusters.\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"metal3\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': '{{ not .Values._internal.state.is_upgrade }}'\n"
+ " os-images-info: true\n"
+ " repo: os-image-server\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: |\n"
+ " {{- $osImageFqdn := (coalesce .Values.external_hostnames.os_image_server .Values.display_external_ip) -}}\n"
+ " OS images are served at URLs like: https://{{ $osImageFqdn }}/<filename>(.sha256sum)\n"
+ " {{- if not (eq $osImageFqdn .Values.display_external_ip)}}\n"
+ " ({{ .Values.external_hostnames.os_image_server }} must resolve to {{ .Values.display_external_ip }})\n"
+ " {{- end }}\n"
+ " sylvactl/unitTimeout: 30m\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: ./charts/os-image-server\n"
+ " targetNamespace: os-images\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " timeout: 168h # leave plenty of time to download OS images in initContainers\n"
+ " values:\n"
+ " outputConfigMap:\n"
+ " namespace: sylva-system\n"
+ " name: capm3-os-image-server-os-images\n"
+ " downloader:\n"
+ " proxy: '{{ get .Values.proxies \"https_proxy\" }}'\n"
+ " no_proxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " extra_ca_certs: '{{ .Values.oci_registry_extra_ca_certs | include \"set-if-defined\" }}'\n"
+ " ingress:\n"
+ " className: nginx\n"
+ " hosts:\n"
+ " - host: '{{ .Values.external_hostnames.os_image_server }}'\n"
+ " osImagePersistenceDefaults:\n"
+ " enabled: true\n"
+ " size: '{{ .Values.os_images_default_download_storage_space }}'\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " os_image_selectors: >-\n"
+ " {{- tuple . \"cluster\" | include \"interpret\" -}}\n"
+ " {{\n"
+ " concat (tuple .Values.cluster | include \"find-cluster-image-selectors\" | fromJsonArray)\n"
+ " (.Values.os_image_server_additional_selectors | values)\n"
+ " | include \"preserve-type\"\n"
+ " }}\n"
+ " valuesFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: os-images-info # this configmap is a byproduct of the os-images-info unit\n\n"
+ " capo-contrail-bgpaas:\n"
+ " info:\n"
+ " description: installs CAPO Contrail BGPaaS controller\n"
+ " maturity: stable\n"
+ " enabled: false\n"
+ " repo: capo-contrail-bgpaas\n"
+ " helm_chart_artifact_name: capo-contrail-bgpaas\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " heat-operator: '{{ tuple . \"heat-operator\" | include \"unit-enabled\" }}'\n"
+ " capo: '{{ tuple . \"capo\" | include \"unit-enabled\" }}'\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: capo-contrail-bgpaas-system\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " conf:\n"
+ " env:\n"
+ " DEFAULT_PORT: '0'\n"
+ " DEFAULT_ASN: '64512'\n\n"
+ " opennebula-cpi:\n"
+ " info:\n"
+ " description: configures OpenNebula Cloud controller manager\n"
+ " internal: true\n"
+ " unit_templates: []\n"
+ " depends_on:\n"
+ " cluster: '{{ tuple . \"cluster\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ eq .Values.cluster.capi_providers.infra_provider \"capone\" }}'\n"
+ " helm_repo_url: https://opennebula.github.io/cloud-provider-opennebula/charts\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: opennebula-cpi\n"
+ " version: \"0.1.2\"\n"
+ " storageNamespace: kube-system\n"
+ " targetNamespace: kube-system\n"
+ " values:\n"
+ " CCM_CTL: cloud-node,cloud-node-lifecycle\n"
+ " CLUSTER_NAME: \"{{ .Values.cluster.name }}\"\n"
+ " PUBLIC_NETWORK_NAME: \"{{ .Values.cluster.capone.public_network }}\"\n"
+ " PRIVATE_NETWORK_NAME: \"\"\n"
+ " ROUTER_TEMPLATE_NAME: \"\"\n"
+ " nodeSelector:\n"
+ " node-role.kubernetes.io/control-plane: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary \"true\" \"\" }}'\n"
+ " helm_secret_values:\n"
+ " ONE_XMLRPC: \"{{ .Values.cluster.capone.ONE_XMLRPC }}\"\n"
+ " ONE_AUTH: \"{{ .Values.cluster.capone.ONE_AUTH }}\"\n\n"
+ " vsphere-cpi:\n"
+ " info:\n"
+ " description: configures Vsphere Cloud controller manager\n"
+ " internal: true\n"
+ " unit_templates: [] # the vsphere-cpi handles the rollout of control plane nodes so must run as soon as the cluster has been created\n"
+ " depends_on:\n"
+ " cluster: '{{ tuple . \"cluster\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ eq .Values.cluster.capi_providers.infra_provider \"capv\" }}'\n"
+ " helm_repo_url: https://kubernetes.github.io/cloud-provider-vsphere\n"
+ " helm_chart_versions:\n"
+ " v1.30.0: '{{ include \"k8s-version-match\" (tuple \">=1.30.0,<1.31.0\" .Values._internal.k8s_version) }}'\n"
+ " v1.31.0: '{{ include \"k8s-version-match\" (tuple \">=1.31.0,<1.32.0\" .Values._internal.k8s_version) }}'\n"
+ " 1.32.1: '{{ include \"k8s-version-match\" (tuple \">=1.32.0\" .Values._internal.k8s_version) }}'\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: vsphere-cpi\n"
+ " version: \"\" # defined by helm_chart_versions\n"
+ " releaseName: vsphere-cpi\n"
+ " storageNamespace: kube-system\n"
+ " targetNamespace: kube-system\n"
+ " values:\n"
+ " config:\n"
+ " enabled: true\n"
+ " vcenter: '{{ .Values.cluster.capv.server }}'\n"
+ " datacenter: '{{ .Values.cluster.capv.dataCenter }}'\n"
+ " thumbprint: '{{ .Values.cluster.capv.tlsThumbprint }}'\n"
+ " daemonset:\n"
+ " image: registry.k8s.io/cloud-pv-vsphere/cloud-provider-vsphere\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - patch: |-\n"
+ " kind: ConfigMap\n"
+ " apiVersion: v1\n"
+ " metadata:\n"
+ " name: vsphere-cloud-config\n"
+ " namespace: kube-system\n"
+ " data:\n"
+ " vsphere.conf: | {{ index (index .Values.vsphere \"vsphere-cpi\") \"vsphere_conf\" | toYaml | nindent 4 }}\n\n"
+ " helm_secret_values:\n"
+ " config:\n"
+ " username: '{{ .Values.cluster.capv.username }}'\n"
+ " password: '{{ .Values.cluster.capv.password }}'\n\n"
+ " vsphere-csi-driver:\n"
+ " info:\n"
+ " description: installs Vsphere CSI\n"
+ " maturity: stable\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ eq .Values.cluster.capi_providers.infra_provider \"capv\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: kustomize-units/vsphere-csi-driver\n"
+ " targetNamespace: vmware-system-csi\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVER: '{{ .Values.cluster.capv.server }}'\n"
+ " DATACENTER: '{{ .Values.cluster.capv.dataCenter }}'\n"
+ " CLUSTER_ID: '{{ printf \"%s-%s\" .Values.cluster.name (randAlphaNum 10) | trunc 64 }}'\n"
+ " STORAGE_POLICY_NAME: '{{ .Values.cluster.capv.storagePolicyName | default \"\" }}'\n"
+ " CONTROLLER_REPLICAS: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " kustomization_substitute_secrets:\n"
+ " USERNAME: '{{ .Values.cluster.capv.username }}'\n"
+ " PASSWORD: '{{ .Values.cluster.capv.password }}'\n\n"
+ " sandbox-privileged-namespace:\n"
+ " info:\n"
+ " description: >\n"
+ " creates the sandbox namespace used\n"
+ " to perform privileged operations like debugging a node.\n"
+ " It cannot be enabled when env_type=prod\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ or (.Values.env_type | eq \"dev\") (.Values.env_type | eq \"ci\") }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/sandbox-privileged-namespace\n"
+ " wait: true\n"
+ " prune: true\n\n"
+ " # Gitea-unit\n"
+ " gitea-secrets:\n"
+ " info:\n"
+ " description: >\n"
+ " create random secret that will be used by gitea application.\n"
+ " secrets are sync with vault.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"gitea\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " vault-config-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/gitea/secrets\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " wait: true\n\n"
+ " gitea-eso:\n"
+ " info:\n"
+ " description: >\n"
+ " write secrets in gitea namespace in gitea expected format\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"gitea\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " eso-secret-stores: true\n"
+ " gitea-keycloak-resources: true\n"
+ " gitea-secrets: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.gitea.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/gitea/eso\n"
+ " wait: false\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: gitea-keycloak-oidc-auth\n"
+ " namespace: gitea\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: gitea-admin\n"
+ " namespace: gitea\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: gitea-postgres-secrets\n"
+ " namespace: gitea\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: gitea-redis\n"
+ " namespace: gitea\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: sylva-ca.crt\n"
+ " namespace: gitea\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: gitea\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.gitea }}'\n"
+ " CERT: '{{ .Values.external_certificates.gitea.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../../tls-components/tls-secret\" \"../../tls-components/tls-certificate\" (hasKey .Values.external_certificates.gitea \"cert\") }}'\n"
+ " - \"../../tls-components/sylva-ca\"\n\n"
+ " gitea-keycloak-resources:\n"
+ " info:\n"
+ " description: >\n"
+ " deploys Gitea OIDC client in Sylva's Keycloak realm\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"gitea\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " keycloak: true\n"
+ " keycloak-legacy-operator: true\n"
+ " keycloak-resources: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/gitea/keycloak-resources\n"
+ " targetNamespace: keycloak\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " GITEA_DNS: '{{ .Values.external_hostnames.gitea }}'\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: keycloak-client-secret-gitea-client # this secret is a byproduct of the gitea-client KeycloakClient resource\n"
+ " namespace: keycloak\n\n"
+ " gitea-redis:\n"
+ " info:\n"
+ " description: installs Redis cluster for Gitea\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"gitea\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " gitea-eso: true\n"
+ " gitea-keycloak-resources: true\n"
+ " gitea-secrets: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " repo: bitnami-redis-cluster\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: bitnami/redis-cluster\n"
+ " targetNamespace: gitea\n"
+ " releaseName: gitea-redis\n"
+ " values:\n"
+ " image:\n"
+ " repository: bitnamilegacy/redis-cluster\n"
+ " metrics:\n"
+ " image:\n"
+ " repository: bitnamilegacy/redis-exporter\n"
+ " sysctlImage:\n"
+ " repository: bitnamilegacy/os-shell\n"
+ " volumePermissions:\n"
+ " image:\n"
+ " repository: bitnamilegacy/os-shell\n"
+ " usePassword: true\n"
+ " existingSecret: gitea-redis\n"
+ " existingSecretPasswordKey: password\n"
+ " global:\n"
+ " storageClass: \"{{ .Values._internal.default_storage_class }}\"\n"
+ " persistence:\n"
+ " size: 8Gi\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " gitea-postgresql-ha:\n"
+ " info:\n"
+ " description: installs PostgreSQL HA cluster for Gitea\n"
+ " maturity: stable\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"gitea\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " gitea-eso: true\n"
+ " gitea-keycloak-resources: true\n"
+ " gitea-secrets: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " repo: bitnami-postgresql-ha\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: bitnami/postgresql-ha\n"
+ " targetNamespace: gitea\n"
+ " releaseName: gitea-postgres\n"
+ " values:\n"
+ " global:\n"
+ " storageClass: \"{{ .Values._internal.default_storage_class }}\"\n"
+ " postgresql:\n"
+ " image:\n"
+ " repository: bitnamilegacy/postgresql-repmgr\n"
+ " username: gitea\n"
+ " database: gitea\n"
+ " existingSecret: gitea-postgres-secrets\n"
+ " pgpool:\n"
+ " image:\n"
+ " repository: bitnamilegacy/pgpool\n"
+ " existingSecret: gitea-postgres-secrets\n"
+ " persistence:\n"
+ " size: 8Gi\n"
+ " metrics:\n"
+ " image:\n"
+ " repository: bitnamilegacy/postgres-exporter\n"
+ " volumePermissions:\n"
+ " image:\n"
+ " repository: bitnamilegacy/os-shell\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " gitea:\n"
+ " info:\n"
+ " description: installs Gitea\n"
+ " maturity: stable\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " gitea-eso: true\n"
+ " gitea-keycloak-resources: true\n"
+ " gitea-secrets: true\n"
+ " gitea-redis: true\n"
+ " gitea-postgresql-ha: true\n"
+ " namespace-defs: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Gitea can be reached at https://{{ .Values.external_hostnames.gitea }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " helm_repo_url: https://dl.gitea.com/charts/\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: gitea\n"
+ " version: 11.0.1\n"
+ " targetNamespace: gitea\n"
+ " releaseName: gitea\n"
+ " values:\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " readOnlyRootFilesystem: true\n"
+ " runAsGroup: 1000\n"
+ " runAsNonRoot: true\n"
+ " runAsUser: 1000\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " redis-cluster:\n"
+ " enabled: false\n"
+ " postgresql:\n"
+ " enabled: false\n"
+ " postgresql-ha:\n"
+ " enabled: false\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " size: 10Gi\n"
+ " storageClass: \"{{ .Values._internal.default_storage_class }}\"\n"
+ " accessModes:\n"
+ " - \"{{ .Values._internal.default_storage_class_access_mode_rwx }}\"\n"
+ " replicaCount: >-\n"
+ " {{- if eq .Values._internal.default_storage_class_RWX_support \"true\" -}}\n"
+ " {{ 3 | include \"preserve-type\" }}\n"
+ " {{- else -}}\n"
+ " {{ 1 | include \"preserve-type\" }}\n"
+ " {{- end -}}\n"
+ " strategy:\n"
+ " type: '{{ eq (tuple . .Values._internal.default_storage_class_RWX_support | include \"interpret-as-string\") \"true\" | ternary \"RollingUpdate\" \"Recreate\" }}'\n"
+ " actions:\n"
+ " provisioning:\n"
+ " publish:\n"
+ " repository: alpine/kubectl\n"
+ " tag: 1.34.1\n"
+ " gitea:\n"
+ " admin:\n"
+ " existingSecret: gitea-admin\n"
+ " metrics:\n"
+ " enabled: true\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " oauth:\n"
+ " - name: \"keycloak-sylva\"\n"
+ " provider: \"openidConnect\"\n"
+ " existingSecret: gitea-keycloak-oidc-auth\n"
+ " autoDiscoverUrl: 'https://{{ .Values.external_hostnames.keycloak }}/realms/sylva/.well-known/openid-configuration'\n\n"
+ " config:\n"
+ " cron:\n"
+ " ENABLED: false\n"
+ " cron.GIT_GC_REPOS:\n"
+ " ENABLED: false\n"
+ " server:\n"
+ " ENABLE_PPROF: true\n"
+ " database:\n"
+ " DB_TYPE: postgres\n"
+ " HOST: gitea-postgres-postgresql-ha-pgpool.gitea.svc.cluster.local:5432\n"
+ " NAME: gitea\n"
+ " USER: gitea\n"
+ " # define by env variable: PASSWD\n"
+ " SCHEMA: public\n"
+ " session:\n"
+ " PROVIDER: redis\n"
+ " # define by env variable: PROVIDER_CONFIG\n"
+ " cache:\n"
+ " ADAPTER: redis\n"
+ " # define by env variable: HOST\n"
+ " queue:\n"
+ " TYPE: redis\n"
+ " # define by env variable: CONN_STR\n"
+ " indexer:\n"
+ " REPO_INDEXER_ENABLED: false\n"
+ " ISSUE_INDEXER_ENABLED: false\n\n"
+ " additionalConfigFromEnvs:\n"
+ " - name: GITEA__DATABASE__PASSWD # define DB password\n"
+ " valueFrom:\n"
+ " secretKeyRef:\n"
+ " key: password\n"
+ " name: gitea-postgres-secrets\n"
+ " - name: GITEA__QUEUE__CONN_STR # redis connection string for queue\n"
+ " valueFrom:\n"
+ " secretKeyRef:\n"
+ " key: connection_string\n"
+ " name: gitea-redis\n"
+ " - name: GITEA__SESSION__PROVIDER_CONFIG # redis connection string for session\n"
+ " valueFrom:\n"
+ " secretKeyRef:\n"
+ " key: connection_string\n"
+ " name: gitea-redis\n"
+ " - name: GITEA__CACHE__HOST # redis connection string for queue\n"
+ " valueFrom:\n"
+ " secretKeyRef:\n"
+ " key: connection_string\n"
+ " name: gitea-redis\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " className: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/proxy-body-size: 8m\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.gitea }}'\n"
+ " secretName: gitea-tls\n"
+ " hosts:\n"
+ " - host: '{{ .Values.external_hostnames.gitea }}'\n"
+ " paths:\n"
+ " - path: /\n"
+ " pathType: Prefix\n"
+ " extraVolumes:\n"
+ " - secret:\n"
+ " defaultMode: 420\n"
+ " secretName: sylva-ca.crt\n"
+ " name: sylva-ca\n"
+ " extraVolumeMounts:\n"
+ " - mountPath: /etc/ssl/certs/sylva-ca.crt\n"
+ " name: sylva-ca\n"
+ " readOnly: true\n"
+ " subPath: ca.crt\n\n"
+ " # Kunai-unit\n"
+ " kunai-secrets:\n"
+ " info:\n"
+ " description: >\n"
+ " create random secret that will be used by kunai application.\n"
+ " secrets are sync with vault.\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kunai\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " vault-config-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kunai/secrets\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n\n"
+ " kunai-eso:\n"
+ " info:\n"
+ " description: >\n"
+ " write secrets in kunai namespace in kunai expected format\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kunai\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " eso-secret-stores: true\n"
+ " keycloak-resources: true\n"
+ " kunai-secrets: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.kunai.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kunai/eso\n"
+ " wait: false\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: kunai-next-oidc\n"
+ " namespace: kunai\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: kunai-next-auth\n"
+ " namespace: kunai\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: sylva-ca.crt\n"
+ " namespace: kunai\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: kunai\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.kunai }}'\n"
+ " CERT: '{{ .Values.external_certificates.kunai.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " KEYCLOAK_DNS: '{{ .Values.external_hostnames.keycloak }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../../tls-components/tls-secret\" \"../../tls-components/tls-certificate\" (hasKey .Values.external_certificates.kunai \"cert\") }}'\n"
+ " - \"../../tls-components/sylva-ca\"\n\n"
+ " kunai-postgres-cnpg:\n"
+ " info:\n"
+ " description: Deploy Postgres cluster for Kunai using Cloud Native PostgreSQL (CNPG)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"kunai\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " cnpg-operator: true\n"
+ " namespace-defs: true\n"
+ " kunai-eso: true\n"
+ " kunai-secrets: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " kustomization_spec:\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " KUNAI_POSTGRES_REPLICAS: '{{ .Values._internal.ha_cluster.is_ha | ternary 3 1 }}'\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " podAntiAffinityType: '{{ .Values._internal.ha_cluster.is_ha | ternary \"required\" \"preferred\" }}'\n"
+ " path: ./kustomize-units/kunai-postgres-cnpg\n"
+ " wait: true\n\n"
+ " kunai:\n"
+ " info:\n"
+ " description: installs Kunai\n"
+ " details: |-\n"
+ " The integration of [Kunai](https://gitlab.com/sylva-projects/sylva-elements/kunai) at this stage should be considered experimental.\n"
+ " Work is in progress to align its integration with workload-cluster-operator and workload-teams-defs.\n"
+ " See https://gitlab.com/groups/sylva-projects/-/epics/58.\n"
+ " maturity: experimental\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " cert-manager: true\n"
+ " keycloak-resources: true\n"
+ " kunai-eso: true\n"
+ " kunai-secrets: true\n"
+ " kunai-postgres-cnpg: true\n"
+ " namespace-defs: true\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Kunai UI can be reached at https://{{ .Values.external_hostnames.kunai }} ({{ .Values.external_hostnames.kunai }} must resolve to {{ .Values.display_external_ip }})\"\n"
+ " helm_repo_url: https://gitlab.com/api/v4/projects/sylva-projects%2Fsylva-elements%2Fkunai/packages/helm/stable\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: kunai\n"
+ " version: 2.1.2\n"
+ " targetNamespace: kunai\n"
+ " values:\n"
+ " postgresql:\n"
+ " enabled: false\n"
+ " externalDatabase:\n"
+ " existingSecret: cnpg-kunai-app\n"
+ " persistence:\n"
+ " enabled: false\n"
+ " kunai:\n"
+ " nextAuth:\n"
+ " enabled: true\n"
+ " existingsecretname: kunai-next-auth\n"
+ " nextOidc:\n"
+ " existingsecretname: kunai-next-oidc\n"
+ " extraCaCert:\n"
+ " enabled: true\n"
+ " existingsecretname: sylva-ca.crt\n"
+ " existingsecretkey: ca.crt\n"
+ " proxies:\n"
+ " http_proxy: '{{ .Values.proxies.http_proxy }}'\n"
+ " https_proxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " no_proxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/proxy-body-size: 8m\n"
+ " nginx.ingress.kubernetes.io/large-client-header-buffers: \"4 16k\"\n"
+ " nginx.ingress.kubernetes.io/proxy-buffer-size: \"256k\"\n"
+ " tls: false\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.kunai }}'\n"
+ " secretName: kunai-tls\n"
+ " hostname: '{{ .Values.external_hostnames.kunai }}'\n\n"
+ " minio-operator-init:\n"
+ " info:\n"
+ " description: sets up MinIO certificate for minio-operator\n"
+ " details: it generate certificate\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"minio-operator\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.minio_operator.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/minio-operator-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: minio-operator-console\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.minio_operator_console }}'\n"
+ " CERTIFICATE_NAMESPACE: minio-operator\n"
+ " CERT: '{{ .Values.external_certificates.minio_operator.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " CACERT_SECRET_NAME: operator-ca-tls\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.minio_operator \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " - \"../kyverno-policies/minio-policies/minio-operator-policies\"\n\n"
+ " minio-operator:\n"
+ " info:\n"
+ " description: install MinIO operator\n"
+ " details: MinIO operator is used to manage multiple S3 tenants\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ or (tuple . \"minio-monitoring\" | include \"unit-enabled\") (tuple . \"minio-logging\" | include \"unit-enabled\") }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " minio-operator-init: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"minio operator console can be reached at https://{{ .Values.external_hostnames.minio_operator_console }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " repo: minio-operator\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: minio-operator\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: helm/operator\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " operator:\n"
+ " env:\n"
+ " - name: OPERATOR_STS_AUTO_TLS_ENABLED\n"
+ " value: \"off\"\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " securityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 100m\n"
+ " memory: 128Mi\n"
+ " ephemeral-storage: 500Mi\n"
+ " limits:\n"
+ " cpu: 200m\n"
+ " memory: 256Mi\n"
+ " replicaCount: 1\n"
+ " console:\n"
+ " enabled: true\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " securityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " host: '{{ .Values.external_hostnames.minio_operator_console }}'\n"
+ " path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.minio_operator_console }}'\n"
+ " secretName: minio-operator-console-tls\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - target:\n"
+ " kind: ClusterRole\n"
+ " name: minio-operator-role\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /rules/-\n"
+ " value:\n"
+ " apiGroups:\n"
+ " - job.min.io\n"
+ " resources:\n"
+ " - '*'\n"
+ " verbs:\n"
+ " - '*'\n"
+ " - target:\n"
+ " kind: Deployment\n"
+ " name: minio-operator\n"
+ " patch: |\n"
+ " kind: _unused_\n"
+ " metadata:\n"
+ " name: _unused_\n"
+ " spec:\n"
+ " strategy:\n"
+ " rollingUpdate:\n"
+ " maxSurge: 0\n"
+ " template:\n"
+ " spec:\n"
+ " containers:\n"
+ " - name: operator\n"
+ " livenessProbe:\n"
+ " initialDelaySeconds: 60\n"
+ " exec:\n"
+ " command:\n"
+ " - bash\n"
+ " - -c\n"
+ " - \"exec 3<>/dev/tcp/localhost/4221\"\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " name: sts-tls\n"
+ " namespace: minio-operator\n\n"
+ " minio-logging-init:\n"
+ " info:\n"
+ " description: sets up secrets and certificates for minio-logging\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"minio-logging\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " vault: true\n"
+ " vault-config-operator: true\n"
+ " eso-secret-stores: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.minio_logging.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/minio-tenant-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " TENANT_NS: minio-logging\n"
+ " TENANT_NAME: logging\n"
+ " CLUSTER_DOMAIN: '{{ .Values.cluster_domain }}'\n"
+ " SERVICE: minio-logging\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.minio_logging }}'\n"
+ " CERTIFICATE_NAMESPACE: minio-logging\n"
+ " CERT: '{{ .Values.external_certificates.minio_logging.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.minio_logging \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " - \"../kyverno-policies/minio-policies/minio-tenant-policies\"\n\n\n"
+ " minio-logging:\n"
+ " info:\n"
+ " description: creates a MinIO tenant for the logging stack, used as S3 storage by Loki\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"loki\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " minio-operator: true\n"
+ " minio-logging-init: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " two-replicas-storageclass: '{{ tuple . \"two-replicas-storageclass\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': '{{ not (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"MinIO logging tenant console can be reached at https://{{ .Values.external_hostnames.minio_logging_console }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " repo: minio-operator\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: minio-logging\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: helm/tenant\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " tenant:\n"
+ " name: logging\n"
+ " serviceMetadata:\n"
+ " minioServiceLabels:\n"
+ " v1.min.io/tenant: logging\n"
+ " configSecret:\n"
+ " existingSecret: true\n"
+ " name: minio-logging-root\n"
+ " accessKey: \"\"\n"
+ " secretKey: \"\"\n"
+ " # Use certificate generated by cert-manager in minio-logging-init instead of relyin on minio operator\n"
+ " certificate:\n"
+ " requestAutoCert: false\n"
+ " externalCertSecret:\n"
+ " - name: minio-logging-internal-tls\n"
+ " type: kubernetes.io/tls\n"
+ " # The Kubernetes secret name that contains MinIO environment variable configurations:\n"
+ " configuration:\n"
+ " name: minio-logging-root\n"
+ " # Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning:\n"
+ " users:\n"
+ " - name: minio-logging-user\n"
+ " pools:\n"
+ " - servers: '{{ min .Values._internal.node_count 4 }}'\n"
+ " name: pool-0\n"
+ " volumesPerserver: 4\n"
+ " size: 3Gi\n"
+ " storageClassName: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " containerSecurityContext:\n"
+ " runAsUser: 1000\n"
+ " runAsGroup: 1000\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " securityContext:\n"
+ " runAsUser: 1000\n"
+ " runAsGroup: 1000\n"
+ " fsGroup: 1000\n"
+ " fsGroupChangePolicy: \"OnRootMismatch\"\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " resources:\n"
+ " limits:\n"
+ " cpu: 500m\n"
+ " memory: 1024Mi\n"
+ " requests:\n"
+ " cpu: 100m\n"
+ " memory: 512Mi\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " requiredDuringSchedulingIgnoredDuringExecution:\n"
+ " - labelSelector:\n"
+ " matchExpressions:\n"
+ " - key: v1.min.io/pool\n"
+ " operator: In\n"
+ " values:\n"
+ " - pool-0\n"
+ " - key: v1.min.io/tenant\n"
+ " operator: In\n"
+ " values:\n"
+ " - logging\n"
+ " topologyKey: \"kubernetes.io/hostname\"\n"
+ " metrics:\n"
+ " enabled: true\n"
+ " env:\n"
+ " - name: MINIO_PROMETHEUS_AUTH_TYPE\n"
+ " value: public\n"
+ " features:\n"
+ " bucketDNS: false\n"
+ " enableSFTP: false\n"
+ " buckets:\n"
+ " - name: \"loki-chunks\"\n"
+ " region: \"logging-cluster\"\n"
+ " objectLock: false\n"
+ " - name: \"loki-ruler\"\n"
+ " region: \"monitoring-cluster\"\n"
+ " objectLock: false\n"
+ " - name: \"loki-admin\"\n"
+ " region: \"monitoring-cluster\"\n"
+ " objectLock: false\n"
+ " prometheusOperator: false # Prometheus Operator's Service Monitor for MinIO Tenant Pods\n"
+ " logging:\n"
+ " anonymous: true\n"
+ " json: true\n"
+ " quiet: true\n"
+ " ingress:\n"
+ " api:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n"
+ " host: '{{ .Values.external_hostnames.minio_logging }}'\n"
+ " path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.minio_logging }}'\n"
+ " secretName: minio-logging-tls\n"
+ " console:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n"
+ " host: '{{ .Values.external_hostnames.minio_logging_console }}'\n"
+ " path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.minio_logging_console }}'\n"
+ " secretName: minio-logging-tls\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: minio.min.io/v2\n"
+ " kind: Tenant\n"
+ " name: logging\n"
+ " namespace: minio-logging\n"
+ " healthCheckExprs: >-\n"
+ " {{\n"
+ " tuple (list (dict\n"
+ " \"apiVersion\" \"minio.min.io/v2\"\n"
+ " \"kind\" \"Tenant\"\n"
+ " \"current\" \"status.healthStatus == \\\"green\\\"\"\n"
+ " \"failed\" \"status.healthStatus != \\\"green\\\"\"\n"
+ " )\n"
+ " )\n"
+ " (lookup\n"
+ " \"apiextensions.k8s.io/v1\"\n"
+ " \"CustomResourceDefinition\"\n"
+ " \"\"\n"
+ " \"kustomizations.kustomize.toolkit.fluxcd.io\"\n"
+ " | toYaml\n"
+ " | contains \"HealthCheckExprs\"\n"
+ " )\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " velero:\n"
+ " info:\n"
+ " description: Deploys Velero and its dependencies\n"
+ " maturity: beta\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://vmware-tanzu.github.io/helm-charts\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: velero\n"
+ " version: 11.0.0\n"
+ " targetNamespace: velero\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " installCRDs: true\n"
+ " snapshotsEnabled: false\n"
+ " backupsEnabled: true\n"
+ " deployNodeAgent: false\n"
+ " configuration:\n"
+ " backupStorageLocation: []\n"
+ " volumeSnapshotLocation: []\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 500m\n"
+ " memory: 128Mi\n"
+ " limits:\n"
+ " cpu: 1000m\n"
+ " memory: 512Mi\n"
+ " kubectl:\n"
+ " image:\n"
+ " repository: alpine/kubectl\n"
+ " tag: 1.34.1\n\n"
+ " minio-monitoring-init:\n"
+ " info:\n"
+ " description: sets up secrets and certificates for minio-monitoring\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"minio-monitoring\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " vault: true\n"
+ " vault-config-operator: true\n"
+ " eso-secret-stores: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.minio_monitoring.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/minio-tenant-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " TENANT_NS: minio-monitoring\n"
+ " TENANT_NAME: monitoring\n"
+ " CLUSTER_DOMAIN: '{{ .Values.cluster_domain }}'\n"
+ " SERVICE: minio-monitoring\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.minio_monitoring }}'\n"
+ " CERTIFICATE_NAMESPACE: minio-monitoring\n"
+ " CERT: '{{ .Values.external_certificates.minio_monitoring.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " K8S_AUTH_PATH: '{{ .Values.security.vault.paths.k8s }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " VAULT_API: '{{ .Values.security.vault.external_vault_url | include \"set-if-defined\" }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.minio_monitoring \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n"
+ " - \"../kyverno-policies/minio-policies/minio-tenant-policies\"\n\n"
+ " minio-monitoring:\n"
+ " info:\n"
+ " description: creates a MinIO tenant for the monitoring stack, used as S3 storage by Thanos\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"thanos\" | include \"unit-enabled\" }}'\n"
+ " # only enable when no custom objstoreConfig is provided\n"
+ " - '{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " minio-operator: true\n"
+ " minio-monitoring-init: true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " two-replicas-storageclass: '{{ tuple . \"two-replicas-storageclass\" | include \"unit-enabled\" }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': '{{ not (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"MinIO monitoring tenant console can be reached at https://{{ .Values.external_hostnames.minio_monitoring_console }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " repo: minio-operator\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: minio-monitoring\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: helm/tenant\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " tenant:\n"
+ " name: monitoring\n"
+ " serviceMetadata:\n"
+ " minioServiceLabels:\n"
+ " v1.min.io/tenant: monitoring\n"
+ " configSecret:\n"
+ " existingSecret: true\n"
+ " name: minio-monitoring-root\n"
+ " accessKey: \"\"\n"
+ " secretKey: \"\"\n"
+ " # Use certificate generated by cert-manager in minio-logging-init instead of relyin on minio operator\n"
+ " certificate:\n"
+ " requestAutoCert: false\n"
+ " externalCertSecret:\n"
+ " - name: minio-monitoring-internal-tls\n"
+ " type: kubernetes.io/tls\n"
+ " # The Kubernetes secret name that contains MinIO environment variable configurations:\n"
+ " configuration:\n"
+ " name: minio-monitoring-root\n"
+ " # Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning:\n"
+ " users:\n"
+ " - name: minio-monitoring-user\n"
+ " pools:\n"
+ " - servers: '{{ min .Values._internal.node_count 4 }}'\n"
+ " name: pool-0\n"
+ " volumesPerserver: 4\n"
+ " size: 10Gi\n"
+ " storageClassName: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " containerSecurityContext:\n"
+ " runAsUser: 1000\n"
+ " runAsGroup: 1000\n"
+ " runAsNonRoot: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " securityContext:\n"
+ " runAsUser: 1000\n"
+ " runAsGroup: 1000\n"
+ " fsGroup: 1000\n"
+ " fsGroupChangePolicy: \"OnRootMismatch\"\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " resources:\n"
+ " limits:\n"
+ " cpu: 500m\n"
+ " memory: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"512Mi\" \"2Gi\" }}'\n"
+ " requests:\n"
+ " cpu: 100m\n"
+ " memory: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"128Mi\" \"1Gi\" }}'\n"
+ " affinity:\n"
+ " podAntiAffinity:\n"
+ " requiredDuringSchedulingIgnoredDuringExecution:\n"
+ " - labelSelector:\n"
+ " matchExpressions:\n"
+ " - key: v1.min.io/pool\n"
+ " operator: In\n"
+ " values:\n"
+ " - pool-0\n"
+ " - key: v1.min.io/tenant\n"
+ " operator: In\n"
+ " values:\n"
+ " - monitoring\n"
+ " topologyKey: \"kubernetes.io/hostname\"\n"
+ " metrics:\n"
+ " enabled: true\n"
+ " env:\n"
+ " - name: MINIO_PROMETHEUS_AUTH_TYPE\n"
+ " value: public\n"
+ " features:\n"
+ " bucketDNS: false\n"
+ " enableSFTP: false\n"
+ " buckets:\n"
+ " - name: \"thanos\"\n"
+ " region: \"monitoring-cluster\"\n"
+ " objectLock: false\n"
+ " prometheusOperator: false # Prometheus Operator's Service Monitor for MinIO Tenant Pods\n"
+ " logging:\n"
+ " anonymous: true\n"
+ " json: true\n"
+ " quiet: true\n"
+ " ingress:\n"
+ " api:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n"
+ " host: '{{ .Values.external_hostnames.minio_monitoring }}'\n"
+ " path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.minio_monitoring }}'\n"
+ " secretName: minio-monitoring-tls\n"
+ " console:\n"
+ " enabled: true\n"
+ " ingressClassName: nginx\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n"
+ " host: '{{ .Values.external_hostnames.minio_monitoring_console }}'\n"
+ " path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.minio_monitoring_console }}'\n"
+ " secretName: minio-monitoring-tls\n"
+ " kustomization_spec:\n"
+ " healthChecks:\n"
+ " - apiVersion: minio.min.io/v2\n"
+ " kind: Tenant\n"
+ " name: monitoring\n"
+ " namespace: minio-monitoring\n"
+ " healthCheckExprs: >-\n"
+ " {{\n"
+ " tuple (list (dict\n"
+ " \"apiVersion\" \"minio.min.io/v2\"\n"
+ " \"kind\" \"Tenant\"\n"
+ " \"current\" \"status.healthStatus == \\\"green\\\"\"\n"
+ " \"failed\" \"status.healthStatus != \\\"green\\\"\"\n"
+ " )\n"
+ " )\n"
+ " (lookup\n"
+ " \"apiextensions.k8s.io/v1\"\n"
+ " \"CustomResourceDefinition\"\n"
+ " \"\"\n"
+ " \"kustomizations.kustomize.toolkit.fluxcd.io\"\n"
+ " | toYaml\n"
+ " | contains \"HealthCheckExprs\"\n"
+ " )\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " thanos-init:\n"
+ " info:\n"
+ " description: sets up thanos certificate\n"
+ " details: it generates a multiple CN certificate for all Thanos components\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"thanos\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " eso-secret-stores: true\n"
+ " minio-monitoring-init: '{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: '{{ if .Values.monitoring.thanos.objstoreConfig.config | empty }}./kustomize-units/thanos-init{{ else }}./kustomize-units/thanos-dummy{{ end }}'\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: thanos\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.thanos }}'\n"
+ " CERTIFICATE_NAMESPACE: thanos\n"
+ " THANOS_RECEIVE_DNS: '{{ .Values.external_hostnames.thanos_receive }}'\n"
+ " THANOS_STOREGATEWAY_DNS: '{{ .Values.external_hostnames.thanos_storegateway }}'\n"
+ " THANOS_QUERY_DNS: '{{ .Values.external_hostnames.thanos_query }}'\n"
+ " THANOS_BUCKETWEB_DNS: '{{ .Values.external_hostnames.thanos_bucketweb }}'\n"
+ " CERT: '{{ .Values.external_certificates.thanos.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: Certificate\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /spec/dnsNames/-\n"
+ " value: ${THANOS_RECEIVE_DNS}\n"
+ " - op: add\n"
+ " path: /spec/dnsNames/-\n"
+ " value: ${THANOS_STOREGATEWAY_DNS}\n"
+ " - op: add\n"
+ " path: /spec/dnsNames/-\n"
+ " value: ${THANOS_QUERY_DNS}\n"
+ " - op: add\n"
+ " path: /spec/dnsNames/-\n"
+ " value: ${THANOS_BUCKETWEB_DNS}\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.thanos \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " thanos-credentials-secret:\n"
+ " info:\n"
+ " description: create a secret containing tenant's thanos credentials\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"thanos\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " kyverno-policies-ready: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " PASSWORD: '{{ .Values._internal.thanos_password }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " path: ./kustomize-units/credentials-secret\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " LABEL_NAME: sylva.io/aggregated-secret\n"
+ " LABEL_VALUE: thanos\n"
+ " SECRET_NAME: thanos-credentials\n"
+ " USERNAME: '{{ .Values._internal.thanos_user }}'\n"
+ " healthChecks: # ensure that kyverno has produced the secret\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " namespace: sylva-system\n"
+ " name: thanos-secrets\n\n"
+ " thanos-statefulsets-cleanup:\n"
+ " info:\n"
+ " description: Remove Thanos statefulsets from previous deployments\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - kube-job\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"thanos\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " thanos-init: true\n"
+ " thanos-credentials-secret: true\n"
+ " kustomization_spec:\n"
+ " targetNamespace: thanos\n"
+ " _patches:\n"
+ " - '{{ tuple . \"units.thanos.helmrelease_spec.values\" | include \"interpret\" -}}\n"
+ " {{ include \"kube-job-add-env-var-patch\" (dict\n"
+ " \"EXPECTED_HASH_receive\" (.Values.units.thanos.helmrelease_spec.values.receive.persistence | toJson | sha256sum | trunc 8)\n"
+ " \"EXPECTED_HASH_storegateway\" (.Values.units.thanos.helmrelease_spec.values.storegateway.persistence | toJson | sha256sum | trunc 8)\n"
+ " \"EXPECTED_HASH_ruler\" (.Values.units.thanos.helmrelease_spec.values.ruler.persistence | toJson | sha256sum | trunc 8)\n"
+ " )}}'\n"
+ " - '{{ include \"kube-job-replace-script-patch\" (.Files.Get \"scripts/thanos-delete-statefulsets.sh\") }}'\n\n"
+ " thanos:\n"
+ " info:\n"
+ " description: installs Thanos\n"
+ " maturity: beta\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " thanos-init: true\n"
+ " thanos-credentials-secret: true\n"
+ " thanos-statefulsets-cleanup: '{{ tuple . \"thanos-statefulsets-cleanup\" | include \"unit-enabled\" }}'\n"
+ " minio-monitoring: '{{ .Values.monitoring.thanos.objstoreConfig.config | empty }}'\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': true\n"
+ " ingress-nginx: '{{ tuple . \"ingress-nginx\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Thanos UIs can be reached at https://{{ .Values.external_hostnames.thanos }} and https://{{ .Values.external_hostnames.thanos_bucketweb }} ({{ .Values.external_hostnames.thanos }} and {{ .Values.external_hostnames.thanos_bucketweb }} must resolve to {{ .Values.display_external_ip }})\"\n"
+ " repo: bitnami-thanos\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - target:\n"
+ " # Thanos-compactor replicas is managed by Kyverno policies (kustomize-units/kyverno-policies/generic/components/thanos)\n"
+ " # Hence, drift detection should ignore it.\n"
+ " kind: Deployment\n"
+ " name: thanos-compactor\n"
+ " paths:\n"
+ " - /spec/replicas\n"
+ " targetNamespace: thanos\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: bitnami/thanos\n"
+ " values:\n"
+ " image:\n"
+ " registry: quay.io\n"
+ " repository: thanos/thanos\n"
+ " tag: v0.39.2\n"
+ " volumePermissions:\n"
+ " image:\n"
+ " repository: bitnamilegacy/os-shell\n"
+ " fullnameOverride: \"thanos\"\n"
+ " metrics:\n"
+ " enabled: true\n"
+ " serviceMonitor:\n"
+ " enabled: true\n"
+ " extraParameters:\n"
+ " basicAuth:\n"
+ " password:\n"
+ " name: thanos-basic-auth\n"
+ " key: password\n"
+ " username:\n"
+ " name: thanos-basic-auth\n"
+ " key: username\n"
+ " ruler:\n"
+ " enabled: true\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.1\n"
+ " memory: 128Mi\n"
+ " limits:\n"
+ " cpu: 0.5\n"
+ " memory: 512Mi\n"
+ " dnsDiscovery:\n"
+ " enabled: false\n"
+ " extraFlags:\n"
+ " - --query.config-file=/etc/thanos/ruler-queries.yaml\n"
+ " - --label=platform_tag=\"{{ .Values.monitoring.platform_tag }}\"\n"
+ " - --enable-auto-gomemlimit\n"
+ " - --tsdb.retention=12h\n"
+ " - --tsdb.wal-compression\n"
+ " extraVolumes:\n"
+ " - name: thanos-ruler-queries-config\n"
+ " secret:\n"
+ " secretName: thanos-ruler-queries\n"
+ " extraVolumeMounts:\n"
+ " - name: thanos-ruler-queries-config\n"
+ " mountPath: /etc/thanos/ruler-queries.yaml\n"
+ " subPath: ruler-queries.yaml\n"
+ " alertmanagersConfig: |\n"
+ " alertmanagers:\n"
+ " - scheme: http\n"
+ " api_version: v2\n"
+ " timeout: 10s\n"
+ " static_configs:\n"
+ " {{- $count_am := .Values.units.monitoring.helmrelease_spec.values.alertmanager.alertmanagerSpec.replicas -}}\n"
+ " {{- if kindIs \"string\" $count_am -}}\n"
+ " {{- $count_am = tuple . $count_am | include \"interpret-as-string\" -}}\n"
+ " {{- end -}}\n"
+ " {{- $count_am = int $count_am -}}\n"
+ " {{- range $index_am := until $count_am }}\n"
+ " - alertmanager-rancher-monitoring-alertmanager-{{ $index_am }}.alertmanager-operated.cattle-monitoring-system:9093\n"
+ " {{- end -}}\n"
+ " existingConfigmap: \"sylva-thanos-rules-configmap\" # this ConfigMap is a byproduct of the sylva-thanos-rules unit\n"
+ " sidecars:\n"
+ " # reload thanos-ruler when changes to rule files or alertmanager\n"
+ " # list are detected\n"
+ " - name: configmap-reload\n"
+ " image: bitnamilegacy/configmap-reload:0.15.0\n"
+ " args:\n"
+ " - --volume-dir=/conf/rules/\n"
+ " - --volume-dir=/conf/alertmanagers/\n"
+ " - --webhook-url=http://{{ .Values._internal.thanos_user }}:{{ .Values._internal.thanos_password }}@localhost:10902/-/reload\n"
+ " volumeMounts:\n"
+ " # volume from \"existingConfigmap\"\n"
+ " - name: ruler-config\n"
+ " mountPath: /conf/rules\n"
+ " - name: alertmanagers-config\n"
+ " mountPath: /conf/alertmanagers\n"
+ " volumes:\n"
+ " - name: alertmanagers-config\n"
+ " secret:\n"
+ " secretName: thanos-ruler-alertmanagers-config\n"
+ " securityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " privileged: false\n"
+ " runAsNonRoot: true\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " resources:\n"
+ " requests:\n"
+ " memory: 64Mi\n"
+ " cpu: 100m\n"
+ " limits:\n"
+ " memory: 128Mi\n"
+ " cpu: 250m\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " storageClass: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " size: '{{ .Values.monitoring.thanos.ruler.persistence.size }}'\n"
+ " evalInterval: 1m\n"
+ " replicaCount: '{{ .Values._internal.ha_cluster.is_ha | ternary 2 1 | include \"preserve-type\" }}'\n"
+ " pdb:\n"
+ " create: true\n"
+ " minAvailable: '{{ .Values._internal.ha_cluster.is_ha | ternary 1 0 | include \"preserve-type\" }}'\n"
+ " service:\n"
+ " clusterIP: None\n"
+ " query:\n"
+ " logLevel: info\n"
+ " extraFlags:\n"
+ " - --query.auto-downsampling\n"
+ " - --query.replica-label=prometheus_replica\n"
+ " - --query.replica-label=prometheus\n"
+ " - --enable-auto-gomemlimit\n"
+ " - --query.max-concurrent=20\n"
+ " - --query.max-concurrent-select=4\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.1\n"
+ " memory: 256Mi\n"
+ " limits:\n"
+ " cpu: 0.5\n"
+ " memory: 512Mi\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: \"nginx\"\n"
+ " hostname: '{{ .Values.external_hostnames.thanos_query }}'\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.thanos_query }}'\n"
+ " secretName: thanos-tls\n"
+ " queryFrontend:\n"
+ " logLevel: info\n"
+ " extraFlags:\n"
+ " - --query-frontend.forward-header=Authorization\n"
+ " - --cache-compression-type=snappy\n"
+ " - --query-range.split-interval=6h\n"
+ " - --enable-auto-gomemlimit\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.1\n"
+ " memory: 512Mi\n"
+ " limits:\n"
+ " cpu: 0.5\n"
+ " memory: 1.5Gi\n"
+ " config: |-\n"
+ " type: IN-MEMORY\n"
+ " config:\n"
+ " max_size: 1GB\n"
+ " max_size_items: 0\n"
+ " validity: 0s\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: \"nginx\"\n"
+ " hostname: '{{ .Values.external_hostnames.thanos }}'\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.thanos }}'\n"
+ " secretName: thanos-tls\n"
+ " storegateway:\n"
+ " enabled: true\n"
+ " logLevel: info\n"
+ " extraFlags:\n"
+ " - --index-cache-size=1.5GB\n"
+ " - --enable-auto-gomemlimit\n"
+ " - --sync-block-duration=3m\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " storageClass: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " size: '{{ .Values.monitoring.thanos.storegateway.persistence.size }}'\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " hostname: '{{ .Values.external_hostnames.thanos_storegateway }}'\n"
+ " ingressClassName: \"nginx\"\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.thanos_storegateway }}'\n"
+ " secretName: thanos-tls\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.1\n"
+ " memory: 1.5Gi\n"
+ " limits:\n"
+ " cpu: 0.5\n"
+ " memory: 2Gi\n"
+ " compactor:\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " storageClass: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " size: '{{ .Values.monitoring.thanos.compactor.persistence.size }}'\n"
+ " enabled: true\n"
+ " logLevel: info\n"
+ " extraFlags:\n"
+ " - --deduplication.replica-label=\"prometheus_replica\"\n"
+ " - --delete-delay=12h\n"
+ " consistencyDelay: 30m\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.5\n"
+ " memory: 512Mi\n"
+ " limits:\n"
+ " cpu: 1\n"
+ " memory: 1024Mi\n"
+ " retentionResolutionRaw: 2d\n"
+ " retentionResolution5m: 15d\n"
+ " retentionResolution1h: 0d # disable 1h resolution retention\n"
+ " receive:\n"
+ " enabled: true\n"
+ " logLevel: info\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.5\n"
+ " memory: 4Gi\n"
+ " limits:\n"
+ " cpu: 1\n"
+ " memory: 6Gi\n"
+ " persistence:\n"
+ " enabled: true\n"
+ " storageClass: '{{ tuple . (tuple . \"two-replicas-storageclass\" | include \"unit-enabled\") \"two-replicas-storageclass\" .Values._internal.default_storage_class | include \"interpret-ternary\" }}'\n"
+ " accessModes: [\"ReadWriteOnce\"]\n"
+ " size: '{{ .Values.monitoring.thanos.receive.persistence.size }}'\n"
+ " tsdbRetention: 12h\n"
+ " extraFlags:\n"
+ " - --enable-auto-gomemlimit\n"
+ " - --tsdb.max-retention-bytes=12GB\n"
+ " - --tsdb.wal-compression\n"
+ " - --tsdb.min-block-duration=1h # flush to object storage faster\n"
+ " - --tsdb.max-block-duration=1h # flush to object storage faster\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: \"nginx\"\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/client-body-buffer-size: 1m\n"
+ " hostname: '{{ .Values.external_hostnames.thanos_receive }}'\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.thanos_receive }}'\n"
+ " secretName: thanos-tls\n"
+ " bucketweb:\n"
+ " enabled: true\n"
+ " logLevel: info\n"
+ " resources:\n"
+ " requests:\n"
+ " cpu: 0.1\n"
+ " memory: 128Mi\n"
+ " limits:\n"
+ " cpu: 0.5\n"
+ " memory: 256Mi\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " hostname: '{{ .Values.external_hostnames.thanos_bucketweb }}'\n"
+ " ingressClassName: \"nginx\"\n"
+ " extraTls:\n"
+ " - hosts:\n"
+ " - '{{ .Values.external_hostnames.thanos_bucketweb }}'\n"
+ " secretName: thanos-tls\n"
+ " valuesFrom:\n"
+ " # use values from thanos-minio-user only when using internal MinIO storage\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"thanos-minio-user\"\n"
+ " \"valuesKey\" \"CONSOLE_ACCESS_KEY\"\n"
+ " \"targetPath\" \"objstoreConfig.config.access_key\"\n"
+ " \"optional\" true\n"
+ " )\n"
+ " (.Values.monitoring.thanos.objstoreConfig.config | empty)\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " - >-\n"
+ " {{ tuple (dict\n"
+ " \"kind\" \"Secret\"\n"
+ " \"name\" \"thanos-minio-user\"\n"
+ " \"valuesKey\" \"CONSOLE_SECRET_KEY\"\n"
+ " \"targetPath\" \"objstoreConfig.config.secret_key\"\n"
+ " \"optional\" true\n"
+ " )\n"
+ " (.Values.monitoring.thanos.objstoreConfig.config | empty)\n"
+ " | include \"set-only-if\"\n"
+ " }}\n"
+ " # Use tenant list built by kyverno policy thanos-aggregated-secret to fill auth.basicAuthUsers object\n"
+ " - kind: Secret\n"
+ " name: thanos-secrets\n"
+ " valuesKey: secrets\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n"
+ " # add the current persistence config as a StatefulSet annotation so we know if we need to delete it\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " name: thanos-receive\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /metadata/annotations/thanos.persistent-config-hash\n"
+ " value: |-\n"
+ " {{ tuple . \"units.thanos.helmrelease_spec.values.receive.persistence\" | include \"interpret\" -}}\n"
+ " {{ .Values.units.thanos.helmrelease_spec.values.receive.persistence | toJson | sha256sum | trunc 8 }}\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " name: thanos-storegateway\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /metadata/annotations/thanos.persistent-config-hash\n"
+ " value: |-\n"
+ " {{ tuple . \"units.thanos.helmrelease_spec.values.storegateway.persistence\" | include \"interpret\" -}}\n"
+ " {{ .Values.units.thanos.helmrelease_spec.values.storegateway.persistence | toJson | sha256sum | trunc 8 }}\n"
+ " - target:\n"
+ " kind: StatefulSet\n"
+ " name: thanos-ruler\n"
+ " patch: |\n"
+ " - op: add\n"
+ " path: /metadata/annotations/thanos.persistent-config-hash\n"
+ " value: |-\n"
+ " {{ tuple . \"units.thanos.helmrelease_spec.values.ruler.persistence\" | include \"interpret\" -}}\n"
+ " {{ .Values.units.thanos.helmrelease_spec.values.ruler.persistence | toJson | sha256sum | trunc 8 }}\n"
+ " helm_secret_values:\n"
+ " extraDeploy:\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " metadata:\n"
+ " name: thanos-basic-auth\n"
+ " type: Opaque\n"
+ " data:\n"
+ " username: '{{ .Values._internal.thanos_user | b64enc }}'\n"
+ " password: '{{ .Values._internal.thanos_password | b64enc }}'\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " metadata:\n"
+ " name: thanos-ruler-queries\n"
+ " namespace: thanos\n"
+ " type: Opaque\n"
+ " stringData:\n"
+ " ruler-queries.yaml: |\n"
+ " - http_config:\n"
+ " basic_auth:\n"
+ " username: \"{{ .Values._internal.thanos_user }}\"\n"
+ " password: \"{{ .Values._internal.thanos_password }}\"\n"
+ " tls_config:\n"
+ " insecure_skip_verify: true\n"
+ " static_configs: [\"thanos-query-frontend:9090\"]\n"
+ " scheme: http\n"
+ " objstoreConfig:\n"
+ " '{{ .Values._internal.thanos.objstoreConfig | include \"preserve-type\" }}'\n\n"
+ " logging-crd:\n"
+ " info:\n"
+ " description: install logging-operator CRD\n"
+ " maturity: beta\n"
+ " hidden: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"logging\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " repo: logging-chart-repository\n"
+ " helmrelease_spec:\n"
+ " releaseName: logging-crd\n"
+ " targetNamespace: kube-logging\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: charts/logging-operator/charts/logging-operator-crds\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " logging:\n"
+ " enabled: false\n"
+ " info:\n"
+ " description: Deploys Fluentbit/Fluentd logging stack from logging-operator chart, for log scraping and shipping\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " logging-crd: true\n"
+ " repo: logging-chart-repository\n"
+ " helmrelease_spec:\n"
+ " targetNamespace: kube-logging\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: charts/logging-operator\n"
+ " values:\n"
+ " logging:\n"
+ " enabled: true\n"
+ " fluentd:\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 1Gi\n"
+ " requests:\n"
+ " memory: 300M\n"
+ " fluentbit:\n"
+ " customParsers: |\n"
+ " [PARSER]\n"
+ " Name crilog\n"
+ " Format regex\n"
+ " Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]+) (?<log>.*)$\n"
+ " Time_Key time\n"
+ " Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"
+ " Time_Keep \"On\"\n"
+ " inputTail:\n"
+ " Parser: crilog\n"
+ " Path_Key: \"log_file\"\n"
+ " image:\n"
+ " repository: ghcr.io/kube-logging/logging-operator\n"
+ " tag: '{{ .Values.source_templates | dig \"logging-chart-repository\" \"spec\" \"ref\" \"tag\" \"error, source_templates.logging-chart-repository has no spec.ref.tag\" }}'\n"
+ " securityContext:\n"
+ " readOnlyRootFilesystem: true\n"
+ " allowPrivilegeEscalation: false\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " capabilities:\n"
+ " drop: [ \"ALL\" ]\n\n"
+ " logging-config:\n"
+ " info:\n"
+ " description: Configures logging unit to ship logs to Loki\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"logging\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " logging: true\n"
+ " #loki: this dependency is handled in a different way, in management.values.yaml and workload-cluster.values.yaml\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " LOKI_USERNAME: '{{ .Values._internal.loki_user }}'\n"
+ " LOKI_PASSWORD: '{{ .Values._internal.loki_password }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/kube-logging\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # we must use external name in order to work also for workload clusters\n"
+ " LOKI_URL: '{{ .Values.logging.loki_url }}'\n"
+ " CLUSTER_NAME: '{{ .Values.cluster.name }}'\n"
+ " PATH: /var/log/journal\n"
+ " CHUNK_LIMIT_SIZE: 8m\n"
+ " FLUSH_THREAD_COUNT: \"8\"\n"
+ " FLUSH_INTERVAL: 2s\n"
+ " FLUSH_MODE: interval\n"
+ " wait: true\n"
+ " _components:\n"
+ " - '{{ tuple \"components/host-tailer-rke2\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" ) | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/host-tailer-kubeadm\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" ) | include \"set-only-if\" }}'\n\n"
+ " sylva-logging-flows:\n"
+ " info:\n"
+ " description: configures logging flows and output to export the platform logs to an external server\n"
+ " maturity: beta\n"
+ " internal: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " depends_on:\n"
+ " logging: true\n"
+ " repo: sylva-logging-flows\n"
+ " helm_chart_artifact_name: sylva-logging-flows\n"
+ " # flow, clusterflows, outputs and clusteroutputs values are documented in the sylva-logging-flow helmchart https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: kube-logging\n"
+ " values:\n"
+ " flows: '{{ .Values.logging.flows | include \"preserve-type\" }}'\n"
+ " clusterflows: '{{ .Values.logging.clusterflows | include \"preserve-type\" }}'\n"
+ " helm_secret_values:\n"
+ " outputs: '{{ .Values.logging.outputs | include \"preserve-type\" }}'\n"
+ " clusteroutputs: '{{ .Values.logging.clusteroutputs | include \"preserve-type\" }}'\n\n"
+ " loki-init:\n"
+ " info:\n"
+ " description: sets up Loki certificate\n"
+ " details: it generate certificate\n"
+ " internal: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"loki\" | include \"unit-enabled\" }}'\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " sylva-ca: true\n"
+ " external-secrets-operator: true\n"
+ " eso-secret-stores: true\n"
+ " minio-logging-init: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " KEY: '{{ .Values.external_certificates.loki.key | default \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/loki-init\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SERVICE: loki\n"
+ " SERVICE_DNS: '{{ .Values.external_hostnames.loki }}'\n"
+ " CERTIFICATE_NAMESPACE: loki\n"
+ " CERT: '{{ .Values.external_certificates.loki.cert | default \"\" | b64enc }}'\n"
+ " CACERT: '{{ .Values.external_certificates.cacert | default \"\" | b64enc }}'\n"
+ " SECRET_PATH: '{{ .Values.security.vault.paths.secret }}'\n"
+ " _components:\n"
+ " - '{{ ternary \"../tls-components/tls-secret\" \"../tls-components/tls-certificate\" (hasKey .Values.external_certificates.loki \"cert\") }}'\n"
+ " - \"../tls-components/sylva-ca\"\n\n"
+ " loki-credentials-secret:\n"
+ " info:\n"
+ " description: create a secret containing tenant's loki credentials\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " kyverno-policies-ready: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"loki\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " PASSWORD: '{{ .Values._internal.loki_password }}'\n"
+ " kustomization_spec:\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " path: ./kustomize-units/credentials-secret\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " LABEL_NAME: sylva.io/aggregated-secret\n"
+ " LABEL_VALUE: loki\n"
+ " SECRET_NAME: loki-credentials\n"
+ " USERNAME: '{{ .Values._internal.loki_user }}'\n"
+ " healthChecks:\n"
+ " # Ensure that loki-aggregated-secret Kyverno ClusterPolicy has produced the secret\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " namespace: sylva-system\n"
+ " name: loki-secrets\n"
+ " healthCheckExprs:\n"
+ " # Check that loki-secrets.data.secrets contains at least a tenant definition,\n"
+ " # considering that base64 encoded value of '{\"loki\":{\"tenants\":[]}}' is 33 characters long\n"
+ " - apiVersion: v1\n"
+ " kind: Secret\n"
+ " current: size(data.?secrets.orValue(\"\")) > 33\n"
+ " failed: size(data.?secrets.orValue(\"\")) <= 33\n\n"
+ " loki:\n"
+ " info:\n"
+ " description: installs Loki log storage\n"
+ " details: installs Loki log storage in simple scalable mode\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " namespace-defs: true\n"
+ " loki-init: true\n"
+ " loki-credentials-secret: true\n"
+ " minio-logging: true\n"
+ " logging-crd: '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"logging\" | include \"unit-enabled\" }}'\n"
+ " annotations:\n"
+ " sylvactl/readyMessage: \"Loki can be reached at https://{{ .Values.external_hostnames.loki }} ({{ .Values._internal.display_external_ip_msg }})\"\n"
+ " helm_repo_url: oci://ghcr.io/grafana/helm-charts\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " ignore:\n"
+ " - paths:\n"
+ " - /metadata/annotations/prometheus-operator-validated\n"
+ " target:\n"
+ " group: monitoring.coreos.com\n"
+ " kind: PrometheusRule\n"
+ " targetNamespace: loki\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: loki\n"
+ " version: 6.41.1\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " values:\n"
+ " global:\n"
+ " dnsService: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary \"rke2-coredns-rke2-coredns\" \"kube-dns\" }}'\n"
+ " loki:\n"
+ " analytics:\n"
+ " reporting_enabled: false\n"
+ " containerSecurityContext:\n"
+ " allowPrivilegeEscalation: false\n"
+ " capabilities:\n"
+ " drop:\n"
+ " - ALL\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " server:\n"
+ " grpc_server_max_recv_msg_size: 52428800\n"
+ " grpc_server_max_send_msg_size: 52428800\n"
+ " compactor:\n"
+ " compaction_interval: 1h\n"
+ " retention_enabled: true\n"
+ " retention_delete_delay: 2h\n"
+ " retention_delete_worker_count: 150\n"
+ " delete_request_store: s3\n"
+ " limits_config:\n"
+ " per_stream_rate_limit: 10MB\n"
+ " ingestion_rate_strategy: local\n"
+ " ingestion_rate_mb: 100\n"
+ " ingestion_burst_size_mb: 100\n"
+ " retention_period: 72h\n"
+ " reject_old_samples: false\n"
+ " unordered_writes: true\n"
+ " allow_structured_metadata: false\n"
+ " ingester:\n"
+ " wal:\n"
+ " enabled: true\n"
+ " dir: /data/wal\n"
+ " schemaConfig:\n"
+ " configs:\n"
+ " - from: \"2022-01-11\"\n"
+ " index:\n"
+ " period: 24h\n"
+ " prefix: loki_index_\n"
+ " object_store: s3\n"
+ " schema: v13\n"
+ " store: tsdb\n"
+ " commonConfig:\n"
+ " replication_factor: 1\n"
+ " storage:\n"
+ " bucketNames:\n"
+ " chunks: \"loki-chunks\"\n"
+ " ruler: \"loki-ruler\"\n"
+ " admin: \"loki-admin\"\n"
+ " s3:\n"
+ " endpoint: \"minio.minio-logging.svc.cluster.local:443\"\n"
+ " s3ForcePathStyle: true\n"
+ " http_config:\n"
+ " insecure_skip_verify: true\n"
+ " storage_class: REDUCED_REDUNDANCY\n"
+ " test:\n"
+ " enabled: false\n"
+ " lokiCanary:\n"
+ " enabled: false\n"
+ " chunksCache:\n"
+ " allocatedMemory: 4096\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 5Gi\n"
+ " requests:\n"
+ " memory: 500Mi\n"
+ " resultsCache:\n"
+ " allocatedMemory: 512\n"
+ " resources:\n"
+ " limits:\n"
+ " memory: 600Mi\n"
+ " requests:\n"
+ " memory: 60Mi\n"
+ " monitoring:\n"
+ " selfMonitoring:\n"
+ " enabled: false\n"
+ " grafanaAgent:\n"
+ " installOperator: false\n"
+ " serviceMonitor:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " relabelings:\n"
+ " # drop loki-gateway endpoints from scraping as basicAuth config for serviceMonitor\n"
+ " # is not supported. See: https://github.com/grafana/loki/issues/14141\n"
+ " - sourceLabels:\n"
+ " - job\n"
+ " regex: \"loki/loki-gateway\"\n"
+ " action: drop\n"
+ " dashboards:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " rules:\n"
+ " enabled: '{{ .Values._internal.monitoring.enabled | include \"preserve-type\" }}'\n"
+ " additionalGroups:\n"
+ " - name: additional-loki-rules\n"
+ " rules:\n"
+ " - record: job:loki_request_duration_seconds_bucket:sum_rate\n"
+ " expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job)\n"
+ " - record: job_route:loki_request_duration_seconds_bucket:sum_rate\n"
+ " expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route)\n"
+ " - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate\n"
+ " expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container)\n"
+ " write:\n"
+ " replicas: '{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}'\n"
+ " persistence:\n"
+ " size: 3Gi\n"
+ " autoscaling:\n"
+ " enable: true\n"
+ " minReplicas: 2\n"
+ " maxReplicas: 6\n"
+ " targetCPUUtilizationPercentage: 40\n"
+ " extraVolumes:\n"
+ " - name: data\n"
+ " emptyDir: {}\n"
+ " - name: loki\n"
+ " emptyDir: {}\n"
+ " extraVolumeMounts:\n"
+ " - name: data\n"
+ " mountPath: /data\n"
+ " - name: loki\n"
+ " mountPath: /loki\n"
+ " read:\n"
+ " replicas: '{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}'\n"
+ " persistence:\n"
+ " size: 3Gi\n"
+ " backend:\n"
+ " replicas: '{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}'\n"
+ " persistence:\n"
+ " size: 3Gi\n"
+ " memberlist:\n"
+ " service:\n"
+ " publishNotReadyAddresses: true\n"
+ " gateway:\n"
+ " nginxConfig:\n"
+ " clientMaxBodySize: 30M\n"
+ " replicas: '{{ int .Values._internal.node_count | gt 2 | ternary 1 2 }}'\n"
+ " ingress:\n"
+ " enabled: true\n"
+ " ingressClassName: \"nginx\"\n"
+ " annotations:\n"
+ " nginx.ingress.kubernetes.io/proxy-body-size: \"4096m\"\n"
+ " nginx.ingress.kubernetes.io/client-body-buffer-size: 10m\n"
+ " hosts:\n"
+ " - host: '{{ .Values.external_hostnames.loki }}'\n"
+ " paths:\n"
+ " - path: /\n"
+ " pathType: Prefix\n"
+ " tls:\n"
+ " - secretName: loki-tls\n"
+ " hosts:\n"
+ " - '{{ .Values.external_hostnames.loki }}'\n"
+ " basicAuth:\n"
+ " enabled: true\n"
+ " containerSecurityContext:\n"
+ " seccompProfile:\n"
+ " type: RuntimeDefault\n"
+ " kubectlImage:\n"
+ " repository: alpine/kubectl\n"
+ " tag: 1.34.1\n"
+ " valuesFrom:\n"
+ " - kind: Secret\n"
+ " name: loki-minio-user\n"
+ " valuesKey: CONSOLE_ACCESS_KEY\n"
+ " targetPath: loki.storage.s3.accessKeyId\n"
+ " optional: false\n"
+ " - kind: Secret\n"
+ " name: loki-minio-user\n"
+ " valuesKey: CONSOLE_SECRET_KEY\n"
+ " targetPath: loki.storage.s3.secretAccessKey\n"
+ " optional: false\n"
+ " # Use tenant list built by kyverno policy loki-aggregated-secret, it will fill loki.tenant object\n"
+ " - kind: Secret\n"
+ " name: loki-secrets\n"
+ " valuesKey: secrets\n"
+ " _postRenderers:\n"
+ " - kustomize:\n"
+ " patches:\n"
+ " - '{{ .Values._internal.pdb_allow_unhealthy_pod_eviction | include \"preserve-type\" }}'\n\n"
+ " single-replica-storageclass:\n"
+ " info:\n"
+ " description: Defines a Longhorn storage class with a single replica\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " longhorn: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/longhorn-storageclass\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLASS_NAME: single-replica-storageclass\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: StorageClass\n"
+ " patch: |\n"
+ " kind: _unused_\n"
+ " metadata:\n"
+ " name: _unused_\n"
+ " annotations:\n"
+ " storageclass.kubernetes.io/is-default-class: '{{ .Values._internal.default_storage_class | eq \"single-replica-storageclass\" }}'\n"
+ " parameters:\n"
+ " numberOfReplicas: \"1\"\n\n"
+ " two-replicas-storageclass:\n"
+ " info:\n"
+ " description: Defines a Longhorn storage class with two replicas\n"
+ " internal: true\n"
+ " repo: sylva-core\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " longhorn: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " - '{{ gt (.Values._internal.longhorn_node_count_upper_bound | int) 1 }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/longhorn-storageclass\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLASS_NAME: two-replicas-storageclass\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: StorageClass\n"
+ " patch: |\n"
+ " kind: _unused_\n"
+ " metadata:\n"
+ " name: _unused_\n"
+ " annotations:\n"
+ " storageclass.kubernetes.io/is-default-class: '{{ .Values._internal.default_storage_class | eq \"two-replicas-storageclass\" }}'\n"
+ " parameters:\n"
+ " numberOfReplicas: \"2\"\n\n"
+ " sylva-prometheus-rules:\n"
+ " info:\n"
+ " description: installs prometheus rules using external helm chart & rules git repo\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " monitoring: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"monitoring\" | include \"unit-enabled\" }}'\n"
+ " repo: sylva-prometheus-rules\n"
+ " helm_chart_artifact_name: sylva-prometheus-rules\n"
+ " helmrelease_spec:\n"
+ " releaseName: sylva-prometheus-rules\n"
+ " targetNamespace: sylva-prometheus-rules\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " install:\n"
+ " createNamespace: true\n"
+ " values:\n"
+ " createRules:\n"
+ " allclusters: true\n"
+ " '{{ .Values.cluster.name }}': true\n"
+ " optional_rules: '{{ .Values._internal.monitoring.conditionals | include \"preserve-type\" }}'\n\n"
+ " sylva-thanos-rules:\n"
+ " info:\n"
+ " description: installs Thanos rules using external helm chart & rules git repo\n"
+ " maturity: beta\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " thanos-init: true\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"thanos\" | include \"unit-enabled\" }}'\n"
+ " - '{{ .Values.units.thanos.helmrelease_spec.values.ruler.enabled }}'\n"
+ " repo: sylva-thanos-rules\n"
+ " helm_chart_artifact_name: sylva-thanos-rules\n"
+ " helmrelease_spec:\n"
+ " releaseName: sylva-thanos-rules\n"
+ " targetNamespace: thanos\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n\n"
+ " sync-openstack-images:\n"
+ " info:\n"
+ " description: Automatically push openstack images to Glance\n"
+ " details: Pushes OS images to Glance, if needed, and retrieves their UUIDs for use in cluster unit\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " os-images-info: true\n"
+ " annotations:\n"
+ " sylvactl/unitTimeout: '{{ mul .Values.get_openstack_images_per_image_timeout_minutes (include \"generate-os-images\" . | fromYaml | dig \"os_images\" dict | len) }}m'\n"
+ " enabled_conditions:\n"
+ " - '{{ eq .Values.cluster.capi_providers.infra_provider \"capo\" }}'\n"
+ " repo: sync-openstack-images\n"
+ " helm_chart_artifact_name: sync-openstack-images\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " targetNamespace: '{{ .Release.Namespace }}'\n"
+ " install:\n"
+ " createNamespace: false\n"
+ " timeout: 60m\n"
+ " valuesFrom:\n"
+ " # os_images:\n"
+ " - kind: ConfigMap\n"
+ " name: os-images-info # by-product of os-images-info unit\n"
+ " values:\n"
+ " downloader:\n"
+ " insecure_client: '{{ .Values.oci_registry_insecure }}'\n"
+ " proxy: '{{ .Values.mgmt_cluster_state_values.proxies.https_proxy }}'\n"
+ " no_proxy: '{{ .Values.mgmt_cluster_state_values.proxies.no_proxy }}'\n"
+ " extra_ca_certs: '{{ tuple (.Values.oci_registry_extra_ca_certs | default \"\") (not .Values.oci_registry_insecure) | include \"set-only-if\" }}'\n"
+ " os_image_selectors: >-\n"
+ " {{- tuple . \"cluster\" | include \"interpret\" -}}\n"
+ " {{ tuple .Values.cluster | include \"find-cluster-image-selectors\" | fromJsonArray | include \"preserve-type\" }}\n"
+ " helm_secret_values:\n"
+ " openstack_clouds_yaml: '{{ .Values.cluster.capo.clouds_yaml | include \"preserve-type\" }}'\n\n"
+ " descheduler:\n"
+ " info:\n"
+ " description: install descheduler\n"
+ " enabled: false\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " helm_repo_url: https://kubernetes-sigs.github.io/descheduler/\n"
+ " helmrelease_spec:\n"
+ " releaseName: descheduler\n"
+ " targetNamespace: kube-system\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: descheduler\n"
+ " version: 0.33.0\n"
+ " values:\n"
+ " kind: CronJob\n"
+ " schedule: \"*/10 * * * *\"\n"
+ " deschedulerPolicy:\n"
+ " profiles:\n"
+ " - name: sylva\n"
+ " pluginConfig:\n"
+ " - name: RemoveDuplicates\n"
+ " - name: LowNodeUtilization\n"
+ " args:\n"
+ " useDeviationThresholds: true\n"
+ " thresholds:\n"
+ " cpu: 20\n"
+ " memory: 20\n"
+ " pods: 20\n"
+ " targetThresholds:\n"
+ " cpu: 60\n"
+ " memory: 60\n"
+ " pods: 50\n"
+ " plugins:\n"
+ " balance:\n"
+ " enabled:\n"
+ " - RemoveDuplicates\n"
+ " - LowNodeUtilization\n\n"
+ " workload-team-defs:\n"
+ " info:\n"
+ " description: installs the workload-team-defs chart\n"
+ " details: installs the workload-team-defs chart to install the workload cluster through CRD\n"
+ " maturity: experimental\n"
+ " repo: workload-team-defs\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " sylva-units-operator: '{{ tuple . \"sylva-units-operator\" | include \"unit-enabled\" }}'\n"
+ " external-secrets-operator: '{{ tuple . \"sylva-units-operator\" | include \"unit-enabled\" }}'\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"workload-cluster-operator\" | include \"unit-enabled\" }}'\n"
+ " helm_chart_artifact_name: workload-team-defs\n"
+ " helmrelease_spec:\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: .\n"
+ " values:\n"
+ " workload_cluster_teams: '{{ .Values.workload_clusters.teams | include \"preserve-type\" }}'\n"
+ " managed_clusters_settings: '{{ tuple . \"mgmt_cluster_state_values\" | include \"interpret\" }}{{ .Values.mgmt_cluster_state_values | include \"preserve-type\" }}'\n"
+ " workload_cluster_sylva_source: '{{ .Values.workload_clusters.sylva_source | include \"preserve-type\" }}'\n\n"
+ " coredns-custom-hosts-import:\n"
+ " enabled: false\n"
+ " info:\n"
+ " description: create a ConfigMap containing workload cluster's DNS A records in [CoreDNS hosts plugin](https://coredns.io/plugins/hosts/)\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpoa\" }}'\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/coredns-custom-hosts-import\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " NAMESPACE: '{{ .Release.Namespace }}'\n"
+ " CLUSTER_NAME: '{{ .Values.cluster.name }}'\n"
+ " CLUSTER_VIRTUAL_IP: '{{ .Values.cluster_virtual_ip }}'\n"
+ " CLUSTER_OKD_BASE_DOMAIN: '{{ .Values.cluster.openshift.baseDomain }}'\n\n"
+ " backup-capi-resources:\n"
+ " info:\n"
+ " description: Backup Cluster API resources\n"
+ " details: Backup periodically Cluster API resources using clusterctl move\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - '{{ tuple \"backup-s3\" (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\") | include \"set-only-if\" }}'\n"
+ " - kube-cronjob\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.backup | ne nil }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/backup\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " ADDITIONAL_RESOURCES: ConfigMap/sylva-units-values Secret/sylva-units-secrets ConfigMap/capo-cluster-resources # space-separated kind/name resources to backup alongside capi resources\n"
+ " _components:\n"
+ " - \"./components/backup-capi-resources\"\n"
+ " - '{{ tuple \"./components/pushgateway\" (tuple . \"prometheus-pushgateway\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"./components/timestamp\" ((.Values.backup | default dict) | dig \"timestamped\" false) | include \"set-only-if\" }}'\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: CronJob\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /spec/schedule\n"
+ " value: \"0 5 * * * \"\n\n"
+ " backup-etcd:\n"
+ " info:\n"
+ " description: Backup Etcd\n"
+ " details: Backup periodically Etcd using etcdctl\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " - '{{ tuple \"backup-s3\" (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\") | include \"set-only-if\" }}'\n"
+ " - kube-cronjob\n"
+ " enabled_conditions:\n"
+ " - '{{ .Values.backup | ne nil }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/backup\n"
+ " targetNamespace: kube-system\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " CLUSTER_NAME: '{{ .Values.cluster.name }}'\n"
+ " RUNASUSER: \"915\"\n"
+ " RUNASGROUP: \"915\"\n"
+ " _components:\n"
+ " - \"./components/backup-etcd\"\n"
+ " - '{{ tuple \"./components/backup-etcd/cabpk-volumes\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"./components/backup-etcd/cabpr-volumes\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"./components/pushgateway\" (tuple . \"prometheus-pushgateway\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"./components/timestamp\" ((.Values.backup | default dict) | dig \"timestamped\" false) | include \"set-only-if\" }}'\n"
+ " _patches:\n"
+ " - target:\n"
+ " kind: CronJob\n"
+ " patch: |\n"
+ " - op: replace\n"
+ " path: /spec/schedule\n"
+ " value: \"0 5 * * * \"\n\n"
+ " rancher-backup-restore-operator-crd:\n"
+ " info:\n"
+ " description: installs the Rancher Backup Restore operator CRDs\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher-backup-restore-operator\" | include \"unit-enabled\" }}'\n"
+ " helm_repo_url: https://charts.rancher.io\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " mode: warn\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-backup-crd\n"
+ " version: 106.0.3+up7.0.2\n"
+ " kustomization_spec:\n"
+ " prune: false\n\n"
+ " rancher-backup-restore-operator-init:\n"
+ " info:\n"
+ " description: configure the Rancher Backup Restore operator, which permit to backup and restore rancher\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled_conditions:\n"
+ " - '{{ tuple . \"rancher-backup-restore-operator\" | include \"unit-enabled\" }}'\n"
+ " depends_on:\n"
+ " rancher-backup-restore-operator-crd: true\n"
+ " repo: sylva-core\n"
+ " kustomization_substitute_secrets:\n"
+ " S3_ACCESS_KEY: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"access_key\" \"\" | b64enc }}'\n"
+ " S3_SECRET_KEY: '{{ (.Values.backup | default dict) | dig \"store\" \"s3\" \"secret_key\" \"\" | b64enc }}'\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/rancher-backup-restore-operator-config\n"
+ " _components:\n"
+ " - '{{ tuple \"./components/backup-to-s3\" (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\") | include \"set-only-if\" }}'\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " SCHEDULE: \"0 5 * * *\"\n\n"
+ " rancher-backup-restore-operator:\n"
+ " info:\n"
+ " description: installs the Rancher Backup Restore operator, which permit to backup and restore rancher\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " depends_on:\n"
+ " rancher-backup-restore-operator-crd: true\n"
+ " rancher-backup-restore-operator-init: true\n"
+ " '{{ .Values._internal.default_storage_class_unit }}': '{{ not (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\") }}'\n"
+ " helm_repo_url: https://charts.rancher.io\n"
+ " helmrelease_spec:\n"
+ " driftDetection:\n"
+ " mode: warn\n"
+ " chart:\n"
+ " spec:\n"
+ " chart: rancher-backup\n"
+ " version: 106.0.3+up7.0.2\n"
+ " targetNamespace: cattle-resources-system\n"
+ " values:\n"
+ " persistence:\n"
+ " enabled: '{{ (not (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\")) | include \"preserve-type\" }}'\n"
+ " storageClass: '{{ .Values._internal.default_storage_class }}'\n"
+ " s3: >-\n"
+ " {{\n"
+ " tuple (dict\n"
+ " \"enabled\" true\n"
+ " \"credentialSecretName\" \"backup-store-s3-rancher\"\n"
+ " \"credentialSecretNamespace\" \"cattle-resources-system\"\n"
+ " \"bucketName\" ((.Values.backup | default dict) | dig \"store\" \"s3\" \"bucket\" \"\")\n"
+ " \"region\" ((.Values.backup | default dict) | dig \"store\" \"s3\" \"region\" \"\")\n"
+ " \"endpoint\" ((.Values.backup | default dict) | dig \"store\" \"s3\" \"host\" \"\")\n"
+ " \"endpointCA\" ((.Values.backup | default dict) | dig \"store\" \"s3\" \"cert\" \"\")\n"
+ " )\n"
+ " (hasKey ((.Values.backup | default dict) | dig \"store\" dict) \"s3\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " firewall-common:\n"
+ " info:\n"
+ " description: >\n"
+ " configures global network policies applying\n"
+ " on the management cluster and on workload clusters\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/firewall/common\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # _internal.cluster_virtual_ip is defined when not using capo\n"
+ " # Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo\n"
+ " CLUSTER_VIRTUAL_IP: '{{ .Values._internal.cluster_virtual_ip | include \"set-if-defined\" }}'\n"
+ " ALLOWED_PUBLIC_SUBNETS_TO_GUI: '{{ .Values.security.firewall.allowed_public_subnets_for_gui | default .Values.security.firewall.allowed_public_subnets | toJson }}'\n"
+ " ALLOWED_PUBLIC_SUBNETS_TO_KUBEAPI: '{{ .Values.security.firewall.allowed_public_subnets_for_kubeapi | default .Values.security.firewall.allowed_public_subnets | toJson }}'\n"
+ " ALLOWED_PUBLIC_SUBNETS_TO_SSH: '{{ .Values.security.firewall.allowed_public_subnets_for_ssh | default .Values.security.firewall.allowed_public_subnets | toJson }}'\n"
+ " NTP_SERVERS: '{{ .Values.ntp.servers | toJson }}'\n"
+ " CLUSTER_PODS_CIDR: '{{ .Values.cluster.cluster_pods_cidrs | toJson }}'\n"
+ " COREDNS_SELECTOR: '{{ .Values._internal.coredns_selector }}'\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources # byproduct of capo-cluster-resources units\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " - kind: ConfigMap\n"
+ " name: management-cluster-addresses # generated by 'create-global-network-set-for-(capo|capm3)-cluster-nodes' Kyverno ClusterPolicy\n"
+ " - kind: ConfigMap\n"
+ " name: cluster-provisioning-addresses # generated by create-global-network-set-for-capm3-cluster-nodes Kyverno ClusterPolicy\n"
+ " optional: '{{ not (and (.Values.cluster.capi_providers.infra_provider | eq \"capm3\") .Values.cluster.capm3.provisioning_pool_name) | include \"as-bool\" }}'\n"
+ " _components:\n"
+ " - '{{ tuple \"components/rke2\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\") | include \"set-only-if\" }}'\n\n"
+ " firewall-management-cluster:\n"
+ " info:\n"
+ " description: >\n"
+ " configures global network policies applying\n"
+ " only on the management cluster\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/firewall/management-cluster\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # _internal.cluster_virtual_ip is defined when not using capo\n"
+ " # Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo\n"
+ " CLUSTER_VIRTUAL_IP: '{{ .Values._internal.cluster_virtual_ip | include \"set-if-defined\" }}'\n"
+ " COREDNS_SELECTOR: '{{ .Values._internal.coredns_selector }}'\n"
+ " DEFAULT_POLICY: '{{ .Values.security.firewall.default_policy }}'\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n"
+ " _components:\n"
+ " - '{{ tuple \"components/neuvector\" (tuple . \"neuvector\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/monitoring\" (tuple . \"monitoring\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/logging\" (tuple . \"logging\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/metal3\" (tuple . \"metal3\" | include \"unit-enabled\") | include \"set-only-if\" }}'\n"
+ " - '{{ tuple \"components/rke2\" (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\") | include \"set-only-if\" }}'\n\n"
+ " firewall-workload-cluster:\n"
+ " info:\n"
+ " description: >\n"
+ " configures global network policies applying\n"
+ " only on the management cluster\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/firewall/workload-cluster\n"
+ " wait: true\n"
+ " postBuild:\n"
+ " substitute:\n"
+ " # _internal.cluster_virtual_ip is defined when not using capo\n"
+ " # Otherwise, CLUSTER_VIRTUAL_IP is substituted from the capo-cluster-resources configmap when using capo\n"
+ " CLUSTER_VIRTUAL_IP: '{{ .Values._internal.cluster_virtual_ip | include \"set-if-defined\" }}'\n"
+ " DEFAULT_POLICY: '{{ .Values.security.firewall.default_policy }}'\n"
+ " substituteFrom:\n"
+ " - kind: ConfigMap\n"
+ " name: capo-cluster-resources\n"
+ " optional: '{{ not (.Values.cluster.capi_providers.infra_provider | eq \"capo\") | include \"as-bool\" }}'\n\n"
+ " # only on the management cluster with capo enabled\n"
+ " firewall-kyverno-capo:\n"
+ " info:\n"
+ " description: >\n"
+ " configures a kyverno policy to create global network sets\n"
+ " with the IP address assigned to the nodes of the Openstack workload clusters\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " capo: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/firewall/kyverno-capo\n"
+ " # if the deployment is not capo, wait for the creation of the kyverno resources\n"
+ " # else wait not only for the creation of the kyverno resources but also for\n"
+ " # the production of the configmap containing the IP addresses of the management cluster\n"
+ " wait: true\n\n"
+ " # only on the management cluster with capm3 enabled\n"
+ " firewall-kyverno-capm3:\n"
+ " info:\n"
+ " description: >\n"
+ " configures a kyverno policy to create global network sets\n"
+ " with the IP address assigned to the nodes of the baremetal workload clusters\n"
+ " internal: true\n"
+ " unit_templates:\n"
+ " - base-deps\n"
+ " enabled: false\n"
+ " repo: sylva-core\n"
+ " depends_on:\n"
+ " capm3: true\n"
+ " kustomization_spec:\n"
+ " path: ./kustomize-units/firewall/kyverno-capm3\n"
+ " # if the deployment is not capm3, wait for the creation of the kyverno resources\n"
+ " # else wait not only for the creation of the kyverno resources but also for\n"
+ " # the production of the configmap containing the IP addresses of the management cluster\n"
+ " wait: true\n\n"
+ "##### stuff related to the 'cluster' unit #####\n"
+ "#\n"
+ "# all these values under 'cluster' are passed as values to sylva-capi-cluster chart\n\n"
+ "cluster:\n"
+ " name: management-cluster\n\n"
+ " # check if the cluster name is starting with a number, set to false to bypass this check\n"
+ " check_cluster_name: true\n\n"
+ " # can be set to true to do an RKE2 deployment disconnected from the Internet:\n"
+ " air_gapped: false\n\n"
+ " # cis profile to be used. Curently supported only for rke2 clusters. \"cis-1.6\" for k8s prior to 1.25, \"cis-1.23\" for 1.25+\n"
+ " cis_profile: cis\n\n"
+ " # for now, the choice below needs to be made\n"
+ " # consistently with the choice of a matching kustomization path\n"
+ " # for the 'cluster' unit\n"
+ " # e.g. you can use ./management-cluster-def/rke2-capd\n"
+ " capi_providers:\n"
+ " infra_provider: capd # capd, capo, capm3, capv or capone\n"
+ " bootstrap_provider: cabpk # cabpr (RKE2) or cabpk (kubeadm)\n\n"
+ " # kubernetes version to be used\n"
+ " k8s_version: '{{ .Values._internal.k8s_version_map | dig .Values.k8s_version_short \"\" | required (printf \"no k8s version defined for %s\" .Values.k8s_version_short) }}'\n\n"
+ " # kube_vip version to be used for kubeadm deployments\n"
+ " images:\n"
+ " kube_vip:\n"
+ " repository: ghcr.io/kube-vip/kube-vip\n"
+ " tag: \"v1.0.0\"\n\n"
+ " # Nodes number for control-plane\n"
+ " control_plane_replicas: 3\n\n"
+ " kubeapiserver_extra_args:\n"
+ " feature-gates: '{{ tuple \"ValidatingAdmissionPolicy=true\" (tuple \"<1.32.0\" .Values._internal.k8s_version | include \"k8s-version-match\") | include \"set-only-if\" }}'\n"
+ " runtime-config: \"admissionregistration.k8s.io/v1beta1\"\n\n"
+ " kubelet_extra_args:\n"
+ " max-pods: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" | ternary \"500\" \"210\" }}'\n\n"
+ " capo:\n"
+ " # flavor_name: m1.large # Openstack flavor name\n"
+ " # image_key: # key of an image in os_images or sylva_diskimagebuilder_images\n"
+ " # image_name: # (deprecated, please use image_key instead) - OpenStack image name (one of image_key and image_name must be set, but not both)\n"
+ " # ssh_key_name: # OpenStack VM SSH key\n"
+ " # network_id: # OpenStack network used for nodes and VIP port\n"
+ " # rootVolume: {} # Let this parameter empty if you don't intent to use root volume\n"
+ " # # otherwise, provide following values\n"
+ " # diskSize: 20 # Size of the VMs root disk\n"
+ " # volumeType: '__DEFAULT__' # Type of volume to be created\n"
+ " # #control_plane_az: # list of OpenStack availability zones to deploy control planes nodes to, otherwise all would be candidates\n"
+ " # clouds_yaml: # (this is a dict, not a YAML string)\n"
+ " # clouds:\n"
+ " # capo_cloud:\n"
+ " # auth:\n"
+ " # auth_url: # replace me\n"
+ " # user_domain_name: # replace me\n"
+ " # project_domain_name: # replace me\n"
+ " # project_name: # replace me\n"
+ " # username: # replace me\n"
+ " # password: # replace me\n"
+ " # region_name: # replace me\n"
+ " # verify: # e.g. false\n"
+ " # cacert: # cert used to validate CA of OpenStack APIs\n\n"
+ " # tag set for OpenStack resources in management cluster:\n"
+ " resources_tag: >-\n"
+ " {{- if .Values.cluster.capi_providers.infra_provider | eq \"capo\" -}}\n"
+ " sylva-{{ .Values.cluster.capo.clouds_yaml.clouds.capo_cloud.auth.username }}\n"
+ " {{- end -}}\n\n"
+ " control_plane:\n"
+ " capo:\n"
+ " security_group_names:\n"
+ " - capo-{{ .Values.cluster.name }}-security-group-ctrl-plane-{{ .Values.cluster.capo.resources_tag }}\n"
+ " - capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}\n"
+ " additional_commands: >-\n"
+ " {{- tuple (dict\n"
+ " \"pre_bootstrap_commands\" (list\n"
+ " \"groupadd -r -g 915 etcd\"\n"
+ " \"useradd -r -s /sbin/nologin -M -u 915 -g 915 etcd\"\n"
+ " )\n"
+ " )\n"
+ " (tuple . \"backup-etcd\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " machine_deployments: {}\n\n"
+ " machine_deployment_default:\n"
+ " machine_deployment_spec:\n"
+ " strategy:\n"
+ " rollingUpdate:\n"
+ " maxUnavailable: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" | ternary 1 0 | include \"preserve-type\" }}'\n"
+ " # use maxSurge 0 for baremetal deployments\n"
+ " maxSurge: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capm3\" | ternary 0 1 | include \"preserve-type\" }}'\n"
+ " capo:\n"
+ " security_group_names:\n"
+ " - capo-{{ .Values.cluster.name }}-security-group-workers-{{ .Values.cluster.capo.resources_tag }}\n"
+ " - capo-{{ .Values.cluster.name }}-security-group-common-{{ .Values.cluster.capo.resources_tag }}\n\n"
+ " cluster_virtual_ip: '{{ .Values.cluster_virtual_ip }}'\n\n"
+ " cluster_public_ip: '{{ tuple (.Values.openstack.floating_ip) (not (eq .Values.openstack.floating_ip \"\")) | include \"set-only-if\" }}'\n"
+ " cluster_services_cidrs:\n"
+ " - 100.73.0.0/16\n"
+ " cluster_pods_cidrs:\n"
+ " - 100.72.0.0/16\n"
+ " cni:\n"
+ " calico:\n"
+ " helm_values: '{{ .Values.calico_helm_values | include \"preserve-type\" }}'\n\n"
+ " coredns:\n"
+ " helm_values:\n"
+ " tolerations:\n"
+ " - key: \"node.cloudprovider.kubernetes.io/uninitialized\"\n"
+ " effect: \"NoSchedule\"\n"
+ " value: \"true\"\n"
+ " global:\n"
+ " clusterDNS: '{{ .Values._internal.coredns.clusterDNS }}'\n"
+ " clusterDomain: '{{ .Values._internal.coredns.clusterDomain }}'\n"
+ " serviceCIDR: '{{ .Values._internal.coredns.serviceCIDR }}'\n\n"
+ " metallb_helm_values: '{{ omit .Values.metallb_helm_values \"prometheus\" | include \"preserve-type\" }}'\n\n"
+ " ntp: '{{ .Values.ntp | include \"preserve-type\" }}'\n"
+ " proxies:\n"
+ " http_proxy: '{{ .Values.proxies.http_proxy }}'\n"
+ " https_proxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " no_proxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n\n"
+ " registry_mirrors: '{{ .Values.registry_mirrors | include \"preserve-type\" }}'\n\n"
+ " helm_oci_url: # for an OCI-based deployment this is overridden in use-oci-artifacts.values.yaml\n"
+ " metallb: \"\"\n"
+ " metallb-resources: \"\"\n"
+ " calico-crd: \"\"\n"
+ " calico: \"\"\n"
+ " coredns: \"\"\n"
+ " helm_versions:\n"
+ " metallb: '{{ .Values.units.metallb.helmrelease_spec.chart.spec.version }}'\n"
+ " metallb-resources: '{{ .Values.source_templates | dig \"metallb-resources\" \"spec\" \"ref\" \"tag\" \"\" }}'\n"
+ " calico-crd: '{{ tuple . \"calico-crd\" (index .Values.units \"calico-crd\") | include \"get-helm-version\" }}'\n"
+ " calico: '{{ tuple . \"calico\" .Values.units.calico | include \"get-helm-version\" }}'\n"
+ " coredns: '{{ .Values.units.coredns.helmrelease_spec.chart.spec.version }}'\n"
+ " helm_extra_ca_certs: # for an OCI-based deployment this is overridden in use-oci-artifacts.values.yaml\n"
+ " metallb: \"\"\n"
+ " metallb-resources: \"\"\n"
+ " calico: \"\"\n"
+ " coredns: \"\"\n\n"
+ " capone: {}\n"
+ " # public_network: \"\"\n"
+ " # master_template: \"\"\n"
+ " # worker_template: \"\"\n"
+ " # images: []\n"
+ " # templates: []\n"
+ " # ONE_XMLRPC: \"\"\n"
+ " # ONE_AUTH: \"\"\n\n"
+ " capv:\n"
+ " # image_name: # vSphere image name\n"
+ " # username: \"\"\n"
+ " # password: \"\"\n"
+ " # dataCenter: \"\"\n"
+ " # server: \"\"\n"
+ " # dataStore: \"\"\n"
+ " # tlsThumbprint: \"\"\n"
+ " # folder: \"\"\n"
+ " # resourcePool: \"\"\n"
+ " # storagePolicyName: \"\"\n"
+ " # networks:\n"
+ " # default:\n"
+ " # networkName: \"\"\n"
+ " # ssh_key: ''\n"
+ " numCPUs: 4\n\n"
+ " capm3:\n"
+ " machine_image_checksum_type: sha256\n"
+ " networks:\n"
+ " primary:\n"
+ " interface: \"\"\n"
+ " image_provisioning_host: '{{ .Values.display_external_ip }}'\n"
+ " image_provisioning_scheme: \"https\"\n"
+ " default_network_settings:\n"
+ " mtu: 1500 # set the default network interfaces MTU size\n\n"
+ " enable_longhorn: '{{ tuple . \"longhorn\" | include \"unit-enabled\" | include \"as-bool\" }}'\n\n"
+ " openshift:\n"
+ " baseDomain: '{{ .Values.cluster_domain }}'\n"
+ " version: 4.19.0-okd-scos.ec.1\n"
+ " sshAuthorizedKey: \"\"\n"
+ " pullSecret: \"\"\n"
+ " additionalEnabledCapabilities:\n"
+ " - marketplace\n"
+ " - NodeTuning\n"
+ " - Storage\n\n"
+ " timeouts:\n"
+ " cluster_delete_hook_job_timeout: 300\n\n"
+ " default_node_class: generic\n"
+ " node_classes: {}\n"
+ " # generic:\n"
+ " # kernel_cmdline:\n"
+ " # hugepages:\n"
+ " # enabled: false\n"
+ " # 2M_percentage_total: \"\"\n"
+ " # 1G_percentage_total: \"\"\n"
+ " # default_size: 2M\n"
+ " # extra_options: \"\"\n"
+ " # kubelet_extra_args: {}\n"
+ " # kubelet_config_file_options: {}\n"
+ " # nodeTaints: {}\n"
+ " # nodeLabels: {}\n"
+ " # nodeAnnotations: {}\n"
+ " # additional_commands:\n"
+ " # pre_bootstrap_commands: []\n"
+ " # post_bootstrap_commands: []\n\n"
+ " ### !!! The Values.cluster.metallb should not be user provided. Use Values.metallb instead !!!\n"
+ " metallb: >-\n"
+ " {{- $cluster_metallb_resources := dict }}\n"
+ " {{- tuple $cluster_metallb_resources .Values.metallb .Values._internal.metallb | include \"merge-append\" }}\n"
+ " {{ $cluster_metallb_resources | include \"preserve-type\" }}\n\n"
+ " rke2:\n"
+ " gzipUserData: '{{ .Values.cluster.capi_providers.infra_provider | eq \"capo\" | include \"preserve-type\" }}'\n\n"
+ " # annotations:\n"
+ " # cluster:\n"
+ " # cluster-maxunavailable.sylva.org/enabled: true\n\n"
+ "cluster_machines_ready:\n"
+ " wait_timeout: 600s\n\n"
+ "capd_docker_host: unix:///var/run/docker.sock\n\n"
+ "openstack:\n"
+ " #external_network_id # Can be provided if a FIP is needed in order to reach the management cluster VIP\n"
+ " floating_ip: \"\" # will typically be set by capo-cluster-resources\n"
+ " storageClass:\n"
+ " name: cinder-csi # name of the storageClass to be created\n"
+ " #type: xxx # please provide the cinder volume type, e.g. 'ceph_sas' (must exist in OpenStack)\n"
+ " control_plane_affinity_policy: soft-anti-affinity\n"
+ " worker_affinity_policy: soft-anti-affinity\n\n"
+ "oci_registry_insecure: false\n\n"
+ "metal3: {}\n"
+ " # external_bootstrap_ip:\n"
+ " # bootstrap_ip:\n\n"
+ "opennebula: {}\n\n"
+ "vsphere:\n"
+ " vsphere-cpi:\n"
+ " vsphere_conf:\n"
+ " # Global properties in this section will be used for all specified vCenters unless overriden in VirtualCenter section.\n"
+ " global:\n"
+ " port: 443\n"
+ " # set insecure-flag to true if the vCenter uses a self-signed cert\n"
+ " insecureFlag: true\n"
+ " # settings for using k8s secret\n"
+ " secretName: vsphere-cloud-secret\n"
+ " secretNamespace: kube-system\n\n"
+ " # vcenter section\n"
+ " vcenter:\n"
+ " '{{ .Values.cluster.capv.server }}':\n"
+ " server: '{{ .Values.cluster.capv.server }}'\n"
+ " datacenters:\n"
+ " - '{{ .Values.cluster.capv.dataCenter }}'\n\n"
+ "cluster_virtual_ip: ''\n\n"
+ "# Admin password that will be configured by default on various units # FIXME, only used for SSO today see https://gitlab.com/sylva-projects/sylva-core/-/issues/503\n"
+ "# [WARNING] This value cannot be overwritten on production environment (env_type: prod)\n"
+ "admin_password: '{{ .Values._internal.default_password }}'\n\n"
+ "flux:\n"
+ " kustomize:\n"
+ " concurrent: 10\n"
+ " log_level: info\n"
+ " features_gates: {}\n"
+ " helm:\n"
+ " concurrent: 10\n"
+ " features_gates:\n"
+ " OOMWatch: true\n"
+ " DetectDrift: true\n"
+ " CorrectDrift: true\n"
+ " log_level: debug\n"
+ " source:\n"
+ " concurrent: 10\n"
+ " log_level: info\n"
+ " features_gates: {}\n\n"
+ "flux_webui:\n"
+ " admin_user: admin\n\n"
+ "# pass the values for node_creation and node_drain timeout in minutes\n"
+ "sylvactl_timing_hints:\n"
+ " node_create_timeout: >-\n"
+ " {{ ternary 30 15 (eq .Values.cluster.capi_providers.infra_provider \"capm3\") | include \"preserve-type\" }}\n"
+ " node_drain_timeout: >-\n"
+ " {{ ternary 30 15 (eq .Values.cluster.capi_providers.infra_provider \"capm3\") | include \"preserve-type\" }}\n\n"
+ "display_external_ip: '{{ .Values.openstack.floating_ip | eq \"\" | ternary .Values.cluster_virtual_ip .Values.openstack.floating_ip }}'\n\n"
+ "cluster_domain: sylva\n\n"
+ "external_hostnames:\n"
+ " rancher: 'rancher.{{ .Values.cluster_domain }}'\n"
+ " vault: 'vault.{{ .Values.cluster_domain }}'\n"
+ " keycloak: 'keycloak.{{ .Values.cluster_domain }}'\n"
+ " flux: 'flux.{{ .Values.cluster_domain }}'\n"
+ " neuvector: 'neuvector.{{ .Values.cluster_domain }}'\n"
+ " harbor: 'harbor.{{ .Values.cluster_domain }}'\n"
+ " goldpinger: 'goldpinger.{{ .Values.cluster_domain }}'\n"
+ " os_image_server: ''\n"
+ " gitea: 'gitea.{{ .Values.cluster_domain }}'\n"
+ " kunai: 'kunai.{{ .Values.cluster_domain }}'\n"
+ " minio_operator_console: 'minio-operator-console.{{ .Values.cluster_domain }}'\n"
+ " minio_monitoring: 'minio-monitoring.{{ .Values.cluster_domain }}'\n"
+ " minio_monitoring_console: 'minio-monitoring-console.{{ .Values.cluster_domain }}'\n"
+ " minio_logging: 'minio-logging.{{ .Values.cluster_domain }}'\n"
+ " minio_logging_console: 'minio-logging-console.{{ .Values.cluster_domain }}'\n"
+ " thanos: 'thanos.{{ .Values.cluster_domain }}'\n"
+ " thanos_storegateway: 'thanos-storegateway.{{ .Values.cluster_domain }}'\n"
+ " thanos_receive: 'thanos-receive.{{ .Values.cluster_domain }}'\n"
+ " thanos_query: 'thanos-query.{{ .Values.cluster_domain }}'\n"
+ " thanos_bucketweb: 'thanos-bucketweb.{{ .Values.cluster_domain }}'\n"
+ " loki: 'loki.{{ .Values.cluster_domain }}'\n"
+ " grafana: 'grafana.{{ .Values.cluster_domain }}'\n"
+ " kubevirt_manager: 'kubevirt-manager.{{ .Values.cluster_domain }}'\n"
+ " # openshift_assisted_service can be overwritten by openshift.assisted.serviceHostname\n"
+ " openshift_assisted_service: 'openshift-assisted-service.{{ .Values.cluster_domain }}'\n"
+ " # openshift_assisted_images can be overwritten by openshift.assisted.imageHostname\n"
+ " openshift_assisted_images: 'openshift-assisted-images.{{ .Values.cluster_domain }}'\n\n"
+ "external_certificates:\n"
+ " rancher: {}\n"
+ " vault: {}\n"
+ " keycloak: {}\n"
+ " flux: {}\n"
+ " neuvector: {}\n"
+ " harbor: {}\n"
+ " goldpinger: {}\n"
+ " os_image_server: {}\n"
+ " gitea: {}\n"
+ " minio_operator: {}\n"
+ " minio_monitoring: {}\n"
+ " minio_logging: {}\n"
+ " thanos: {}\n"
+ " loki: {}\n"
+ " grafana: {}\n"
+ " kubevirt_manager: {}\n"
+ " kunai: {}\n\n"
+ "audit_log:\n"
+ " level: 0\n\n"
+ "keycloak: {}\n\n"
+ "# cis benchmark is only for rke2 so far, e.g. rke2-cis-1.23-profile-hardened\n"
+ "cis_benchmark_scan_profile: '{{ eq .Values.cluster.capi_providers.bootstrap_provider \"cabpr\" | ternary \"rke2-cis-1.9-profile\" \"no-scan-profile-defined-for-kubeadm-cluster\" }}'\n\n"
+ "# os_images images that should be served and from where they should be downloaded\n"
+ "# if empty default value are used\n"
+ "os_images: {}\n\n"
+ "# to configure the SR-IOV VFs on the supported NICs of cluster nodes\n"
+ "sriov:\n"
+ " node_policies: {}\n"
+ "# mypolicy1:\n"
+ "# nodeSelector: {} # <<< lets you further limit the SR-IOV capable nodes on which the VFs have to be created in a certain config; if not set it applies to all SR-IOV nodes\n"
+ "# resourceName: \"\"\n"
+ "# numVfs: \"\"\n"
+ "# deviceType: \"\" # supported values: \"netdevice\" or \"vfio-pci\"\n"
+ "# nicSelector:\n"
+ "# deviceID: \"\"\n"
+ "# vendor: \"\"\n"
+ "# pfNames: []\n"
+ "# rootDevices: []\n\n"
+ "# add ceph cluster details\n"
+ "ceph:\n"
+ " cephfs_csi:\n"
+ " clusterID: \"\"\n"
+ " fs_name: \"\"\n"
+ " adminID: \"\"\n"
+ " adminKey: \"\"\n"
+ " monitors_ips: []\n\n"
+ "# add your proxy settings if required\n"
+ "proxies:\n"
+ " https_proxy: \"\"\n"
+ " http_proxy: \"\"\n"
+ " no_proxy: \"\" # you can also use no_proxy_additional, see below\n\n"
+ "# you can disable default values for no_proxy (localhost,.svc,.cluster.local.,.cluster.local,.sylva)\n"
+ "# Ex: localhost: false\n"
+ "no_proxy_additional:\n"
+ " 10.0.0.0/8: true\n"
+ " 192.168.0.0/16: true\n"
+ " 172.16.0.0/12: true\n"
+ " localhost: true\n"
+ " 127.0.0.1: true\n"
+ " .svc: true\n"
+ " '{{ printf \".%s\" .Values.cluster_domain }}': true\n"
+ " .cluster.local.: true\n"
+ " .cluster.local: true\n\n"
+ "# configure containerd registry mirrors following https://github.com/containerd/containerd/blob/main/docs/hosts.md\n"
+ "registry_mirrors:\n"
+ " default_settings: # <<< These settings will apply to all configured mirrors\n"
+ " capabilities: [\"pull\", \"resolve\"]\n"
+ "# skip_verify: true\n"
+ "# override_path: true\n"
+ "# hosts_config:\n"
+ "# docker.io:\n"
+ "# - mirror_url: http://your.mirror/docker\n"
+ "# registry_settings: # <<< Host settings can be used to override default_settings\n"
+ "# skip_verify: false\n"
+ "# is_default_mirror: true # <<< The server configuration will be the same as host. In case of failure, upstream registry won't be used and keep the environment controlled.\n"
+ "# registry.k8s.io:\n"
+ "# - mirror_url: ...\n"
+ "# _default:\n"
+ "# - mirror_url: ...\n\n"
+ "# deploy emulated baremetal nodes in bootstrap cluster\n"
+ "libvirt_metal:\n"
+ " image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/libvirt-metal:{{ .Values.source_templates | dig \"libvirt-metal\" \"spec\" \"ref\" \"tag\" \"_undefined_\"}}\n"
+ " nodes: {}\n"
+ " #management-cp:\n"
+ " # memGB: 12\n"
+ " # numCPUs: 6\n"
+ " #\n"
+ " #workload-cp:\n"
+ " # memGB: 4\n"
+ " # numCPUs: 2\n"
+ " #\n"
+ " #workload-md:\n"
+ " # memGB: 2\n"
+ " # numCPUs: 2\n\n\n"
+ "# set the type of environment between 3 possible values: dev, ci and prod\n"
+ "env_type: prod\n\n"
+ "# set NTP servers by IP or FQDN and enable their usage for control plane nodes\n"
+ "ntp:\n"
+ " enabled: false\n"
+ " servers:\n"
+ " - 1.2.3.4\n"
+ " - europe.pool.ntp.org\n\n"
+ "# These two sylva_core_oci_registry/sylva_base_oci_registry values govern which OCI repos are used\n"
+ "#\n"
+ "# This matters for:\n"
+ "# 1) OCI deployments\n"
+ "# 2) for non-OCI deployments for retrieving artifacts such as metal3 OS images\n"
+ "#\n"
+ "# For (1) sylva_base_oci_registry is automatically derived from the OCI repo used for sylva-unit HelmRelease.\n"
+ "# For (2) sylva_base_oci_registry can be customized, to use an OCI registry other than registry.gitlab.com/sylva-projects\n"
+ "#\n"
+ "# It should in general not be any need to override sylva_core_oci_registry, which is derived\n"
+ "# from sylva_base_oci_registry.\n"
+ "#\n"
+ "# sylva_base_oci_registry defaults to oci://registry.gitlab.com/sylva-projects\n"
+ "sylva_base_oci_registry:\n"
+ " '{{\n"
+ " regexReplaceAll\n"
+ " \"/sylva-core/?$\"\n"
+ " (lookup \"source.toolkit.fluxcd.io/v1\" \"HelmRepository\" .Release.Namespace \"sylva-units\" | dig \"spec\" \"url\" \"\")\n"
+ " \"\"\n"
+ " | default \"oci://registry.gitlab.com/sylva-projects\"\n"
+ " }}'\n"
+ "sylva_core_oci_registry: '{{ .Values.sylva_base_oci_registry }}/sylva-core'\n\n"
+ "os_images_oci_registries:\n"
+ " sylva:\n"
+ " url: '{{ .Values.sylva_base_oci_registry }}/sylva-elements/diskimage-builder'\n"
+ " tag: '{{ .Values.sylva_diskimagebuilder_version }}'\n"
+ " cosign_publickey: '{{ .Values.security.os_images.cosign_public_key }}'\n"
+ " enabled: true # can be set to false to disable all the images under sylva_diskimagebuilder_images that have 'os_images_oci_registry: sylva'\n"
+ " sylva-kiwi-imagebuilder:\n"
+ " url: '{{ .Values.sylva_base_oci_registry }}/sylva-elements/kiwi-imagebuilder'\n"
+ " tag: '{{ .Values.sylva_kiwi_imagebuilder_version }}'\n"
+ " cosign_publickey: '{{ .Values.security.kiwi_image_builder_cosign_public_key }}'\n\n"
+ "k8s_version_short: \"1.32\"\n\n"
+ "# Renovate Bot needs additional information to detect sylva diskimage-builder version\n"
+ "# renovate: depName=sylva-projects/sylva-elements/diskimage-builder\n"
+ "sylva_diskimagebuilder_version: 0.6.2\n\n"
+ "# renovate: depName=sylva-projects/sylva-elements/kiwi-imagebuilder\n"
+ "sylva_kiwi_imagebuilder_version: 0.3.1\n"
+ "# 'sylva_diskimagebuilder_images' determines which images will be available for CAPO and CAPM3\n"
+ "# via 'os_image_selector' or 'image_keys'\n"
+ "#\n"
+ "# by default it lists OCI artifacts found at gitlab.com/sylva-projects/sylva-elements/diskimage-builder\n"
+ "# or a mirror repo if os_images_oci_registries.sylva is overridden\n"
+ "#\n"
+ "sylva_diskimagebuilder_images:\n"
+ " ubuntu-noble-hardened-rke2-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-hardened-rke2-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-hardened-rke2-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-rke2-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-rke2-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-rke2-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-kubeadm-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-kubeadm-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-kubeadm-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-rke2-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-rke2-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-rke2-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-hardened-rke2-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-hardened-rke2-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-hardened-rke2-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-kubeadm-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-kubeadm-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " opensuse-15-6-plain-kubeadm-1-30:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-ck8s-1-32:\n"
+ " os_images_oci_registry: sylva\n"
+ " ubuntu-noble-plain-ck8s-1-31:\n"
+ " os_images_oci_registry: sylva\n"
+ " leapmicro-6-0-plain-rke2-1-30:\n"
+ " os_images_oci_registry: sylva-kiwi-imagebuilder\n"
+ " leapmicro-6-0-plain-rke2-1-31:\n"
+ " os_images_oci_registry: sylva-kiwi-imagebuilder\n"
+ " leapmicro-6-0-plain-rke2-1-32:\n"
+ " os_images_oci_registry: sylva-kiwi-imagebuilder\n\n"
+ "# this dictionary can be extended to let os-image-server serve additional OS images\n"
+ "# if the mgmt cluster uses capm3, the os-image-server by default will serve the OS images used by the mgmt cluster\n"
+ "# os_image_server_additional_selectors is used to serve additional OS images for workload clusters\n"
+ "#\n"
+ "# os_image_server_additional_selectors:\n"
+ "# k28-for-my-workload-clusters:\n"
+ "# os: opensuse\n"
+ "# os-version: 15.6\n"
+ "# k8s-version: v1.28.*\n"
+ "# k8s-flavor: kubeadm\n"
+ "os_image_server_additional_selectors: {}\n\n"
+ "# this dict can be enriched at deployment time to feed additional information\n"
+ "# into the sylva-units-status ConfigMap\n"
+ "additional_sylva_units_status_info: {}\n\n"
+ "# os_images_default_download_storage_space defines the default size of volumes\n"
+ "# used by os-image-server to download each OS image OCI artifacts\n"
+ "# and uncompress the image stored inside\n"
+ "#\n"
+ "# it needs to cover the size of the OCI artifact *plus* the size of the uncompressed image\n"
+ "#\n"
+ "# this needs to be set to ~25Gi for hardened images\n"
+ "# (until https://gitlab.com/sylva-projects/sylva-elements/diskimage-builder/-/issues/57 is addressed)\n"
+ "os_images_default_download_storage_space: 30Gi\n\n"
+ "get_openstack_images_per_image_timeout_minutes: 30\n\n"
+ "security:\n"
+ " vault:\n"
+ " # specify vault path names for secrets store and kubernetes authentication ('secret' and 'kubernetes' by default)\n"
+ " paths:\n"
+ " secret: secret\n"
+ " k8s: kubernetes\n"
+ " # External Vault values (do not use for the moment)\n"
+ " # external_vault_url: https://myvault.mydomain:8200\n"
+ " # external_vault_ca: |\n"
+ " # -----BEGIN CERTIFICATE-----\n"
+ " # MIIC6jCCAdKgAwIBAgIBADANBgkqh....\n"
+ " external_x509_issuer: {}\n"
+ " # issuer_type: acme or vault\n"
+ " # vault_token: authent token for vault,\n"
+ " # server: e.g. https://vault.external-domain:8200\n"
+ " # vault_path: e.g. /pki/sylva, only when using vault issuer\n"
+ " default_cosign_public_key: | # the default public key in PEM format to be used to verify cosign signatures\n"
+ " -----BEGIN PUBLIC KEY-----\n"
+ " MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEcuuLszwy0u7y394KY6GW1BgjfaU6\n"
+ " aK7e95MV+ikZnRfR5EHtqEk1tScNhMqQJMpuFs3QH1TVBr6TIWVLx5cUtg==\n"
+ " -----END PUBLIC KEY-----\n"
+ " kiwi_image_builder_cosign_public_key: |\n"
+ " -----BEGIN PUBLIC KEY-----\n"
+ " MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf/H8Nl7IOJSwQ3fonpdoKqZWJsgt\n"
+ " +zI1cV172PHY/C/5YhVnMJkgcvOzSkdVxjAjOBoOLReYqF6O7GKRY6dOFw==\n"
+ " -----END PUBLIC KEY-----\n"
+ " upstream_images_signature:\n"
+ " verify: false # verify the signature of the upstream images\n"
+ " policy_action: Audit # action taken by kyverno when validation fails report (set Audit) or block the pod (set Enforce)\n"
+ " repository: '{{ .Values.sylva_base_oci_registry }}/sylva-core/signatures' # the repository storing the signatures\n"
+ " # the public key that kyverno must use to verify the signatures of the trusted upstream images\n"
+ " cosign_public_key: '{{ .Values.security.default_cosign_public_key }}'\n"
+ " images_list: # the list of trusted images to verify, wildcard is allowed, e.g.\n"
+ " - quay.io/keycloak/keycloak*\n"
+ " - hashicorp/vault*\n"
+ " calico_wireguard_enabled: false # disable wireguard by default\n"
+ " neuvector_scanning_enabled: false # disable Neuvector scanning capability by default\n"
+ " os_images:\n"
+ " skip_signing_check: false\n"
+ " cosign_public_key: '{{ .Values.security.default_cosign_public_key }}'\n"
+ " oci_artifacts:\n"
+ " skip_signing_check: false\n"
+ " cosign_public_key: | # the OCI public key is not the default public key used to verify sylva elements, e.g. os_images\n"
+ " -----BEGIN PUBLIC KEY-----\n"
+ " MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEN6LNycNA/OB8/dtqTPZcPDuLnxW\n"
+ " hR0Rskmno7Lx1WqBl2ylN/sfkLEAPfCkizceHiu/fw8lnsPq9uSGlAICeQ==\n"
+ " -----END PUBLIC KEY-----\n"
+ " sbom_operator:\n"
+ " targets:\n"
+ " - configmap # list of targets (i.e. places where the SBOM is uploaded). Supported targets are configmap, oci and dtrack\n"
+ " trivy_operator:\n"
+ " insecure_registries: {}\n"
+ " mirrors: {}\n\n"
+ " firewall:\n"
+ " enabled: false\n"
+ " # By default, allow external ingress traffic from all sources\n"
+ " allowed_public_subnets:\n"
+ " - 0.0.0.0/0\n\n"
+ " # Specify the following values to use different subnets for each type of flow\n"
+ " # allowed_public_subnets_for_gui:\n"
+ " # allowed_public_subnets_for_kubeapi:\n"
+ " # allowed_public_subnets_for_ssh\n\n"
+ " # We drop packets by default\n"
+ " # This value can be set to Allow for troubleshooting\n"
+ " default_policy: Allow\n"
+ " secret_manager:\n"
+ " variant: vault\n\n"
+ "monitoring:\n"
+ " platform_tag: Sylva\n\n"
+ " thanos:\n"
+ " # receive_url:\n"
+ " # - for mgmt cluster, the local thanos k8s service is used (the URL below)\n"
+ " # - for workload clusters, this is overridden via mgmt_cluster_state_values.monitoring.thanos.receive_url\n"
+ " # to point to the mgmt cluster Thanos receive ingress\n"
+ " receive_url: http://thanos-receive.thanos.svc.cluster.local:19291/api/v1/receive\n\n"
+ " receive:\n"
+ " persistence:\n"
+ " size: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"10Gi\" \"20Gi\" }}'\n"
+ " compactor:\n"
+ " persistence:\n"
+ " size: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"20Gi\" \"50Gi\" }}'\n"
+ " storegateway:\n"
+ " persistence:\n"
+ " size: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"10Gi\" \"15Gi\" }}'\n"
+ " ruler:\n"
+ " persistence:\n"
+ " size: '{{ has .Values.env_type (list \"dev\" \"ci\") | ternary \"2Gi\" \"4Gi\" }}'\n\n"
+ " objstoreConfig:\n"
+ " # only taken into account when monitoring.thanos.objstoreConfig.config is not empty\n"
+ " type: \"S3\"\n\n"
+ " # custom store config\n"
+ " config: {}\n"
+ " # https://github.com/thanos-io/objstore#supported-providers-clients\n"
+ " # lists all supported provides\n\n"
+ " alertmanager:\n"
+ " config: {}\n"
+ " webhooks:\n"
+ " jiralert:\n"
+ " env: {}\n"
+ " config: {}\n"
+ " snmp_notifier:\n"
+ " config:\n"
+ " # trapserver destinations\n"
+ " # address:port format\n"
+ " destinations: []\n"
+ " # SNMP version\n"
+ " version: V2c # V3\n"
+ " auth:\n"
+ " # V2c\n"
+ " community: \"public\"\n"
+ " # V3\n"
+ " auth_protocol: \"SHA\" # MD5 or SHA\n"
+ " priv_protocol: \"AES\" # DES or AES\n"
+ " username: \"snmp-user\"\n"
+ " password: \"snmp-auth-pass\"\n"
+ " priv_password: \"snmp-priv-pass\"\n"
+ " default_extra_args:\n"
+ " - --alert.severities=critical,error,warning,info\n"
+ " - --trap.user-object=4=/etc/snmp_notifier/platform_tag.tpl\n"
+ " - --trap.user-object=5=/etc/snmp_notifier/alertname.tpl\n"
+ " - --trap.user-object=6=/etc/snmp_notifier/cluster.tpl\n"
+ " - --trap.user-object=7=/etc/snmp_notifier/team.tpl\n"
+ " - --snmp.retries=1\n"
+ " - --snmp.timeout=5s\n"
+ " - --log.level=info\n"
+ " - --snmp.version={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.version | default \"V2c\" }}\n"
+ " snmp_v3_extra_args:\n"
+ " # --no-snmp.authentication-enabled to disable authentication\n"
+ " - --snmp.authentication-enabled\n"
+ " - --snmp.authentication-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.auth_protocol }}\n"
+ " # --no-snmp.private-enabled to disable encryption\n"
+ " - --snmp.private-enabled\n"
+ " - --snmp.private-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.priv_protocol }}\n"
+ " final_extra_args: |\n"
+ " {{ if eq .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.version \"V3\" }}\n"
+ " {{ concat .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.default_extra_args .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.snmp_v3_extra_args | include \"preserve-type\" }}\n"
+ " {{ else }}\n"
+ " {{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.default_extra_args | include \"preserve-type\" }}\n"
+ " {{ end }}\n\n"
+ "#monitoring:\n"
+ "# thanos:\n"
+ "# objstoreConfig:\n"
+ "# type: \"GCS\"\n"
+ "# # https://github.com/thanos-io/objstore#gcs\n"
+ "# config:\n"
+ "# bucket: \"test\"\n"
+ "# service_account: |-\n"
+ "# {\n"
+ "# \"type\": \"service_account\",\n"
+ "# \"project_id\": \"project\",\n"
+ "# \"private_key_id\": \"abcdefghijklmnopqrstuvwxyz12345678906666\",\n"
+ "# \"private_key\": \"-----BEGIN PRIVATE KEY-----\\...\\n-----END PRIVATE KEY-----\\n\",\n"
+ "# \"client_email\": \"thanos@example.com\",\n"
+ "# \"client_id\": \"123456789012345678901\",\n"
+ "# \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n"
+ "# \"token_uri\": \"https://oauth2.googleapis.com/token\",\n"
+ "# \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n"
+ "# \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/thanos%40gitpods.iam.gserviceaccount.com\"\n"
+ "# }\n"
+ "# alertmanager:\n"
+ "# # Supported values from: https://prometheus.io/docs/alerting/latest/configuration/\n"
+ "# config:\n"
+ "# global:\n"
+ "# resolve_timeout: 15m\n"
+ "# route:\n"
+ "# group_by: ['namespace', 'alertname', 'cluster', 'severity']\n"
+ "# receiver: 'jira_receiver'\n"
+ "# routes:\n"
+ "# - matchers:\n"
+ "# - severity=~\".*\"\n"
+ "# receiver: jira_receiver\n"
+ "# continue: true\n"
+ "# - matchers:\n"
+ "# - severity=~\".*\"\n"
+ "# receiver: snmp_traps_receiver\n"
+ "# # disable grouping; send each alert individually\n"
+ "# group_by: [\"...\"]\n"
+ "# continue: true\n"
+ "# receivers:\n"
+ "# - name: 'jira_receiver'\n"
+ "# webhook_configs:\n"
+ "# - url: 'http://alertmanager-jiralert:9097/alert'\n"
+ "# send_resolved: false\n"
+ "# # Supported values from: https://github.com/prometheus-community/helm-charts/blob/main/charts/jiralert/values.yaml\n"
+ "# # Example config file: https://github.com/prometheus-community/jiralert/blob/master/examples/jiralert.yml\n"
+ "# - name: 'snmp_traps_receiver'\n"
+ "# webhook_configs:\n"
+ "# - url: 'http://alertmanager-snmp-notifier:9464/alerts'\n"
+ "# send_resolved: false\n"
+ "# webhooks:\n"
+ "# jiralert:\n"
+ "# # If missing, the proxies are those configured in `.Values.proxies`\n"
+ "# env:\n"
+ "# https_proxy: \"https://example.com:3128\"\n"
+ "# http_proxy: \"https://example.com:3128\"\n"
+ "# no_proxy: \"127.0.0.1,jira.example.com\"\n"
+ "# config:\n"
+ "# # Both `defaults` and `receivers` are required\n"
+ "# defaults:\n"
+ "# # API access fields.\n"
+ "# api_url: 'https://example.com'\n"
+ "# user: 'user'\n"
+ "# password: 'password'\n"
+ "# # Alternatively to user and password use a Personal Access Token\n"
+ "# # personal_access_token: \"Your Personal Access Token\". See https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html\n"
+ "# receivers:\n"
+ "# # At least one receiver must be defined\n"
+ "# # Must match the Alertmanager receiver name. Required.\n"
+ "# - name: jira_receiver\n"
+ "# project: Sylva\n"
+ "# snmp_notifier:\n"
+ "# config:\n"
+ "# # trapserver destinations\n"
+ "# destinations:\n"
+ "# - 1.2.3.4:162\n"
+ "# - a.b.c.d:162\n"
+ "# version: V3\n"
+ "# auth:\n"
+ "# # V3\n"
+ "# auth_protocol: \"SHA\" # MD5 or SHA\n"
+ "# priv_protocol: \"AES\" # DES or AES\n"
+ "# username: \"snmp-user\"\n"
+ "# password: \"snmp-auth-pass\"\n"
+ "# priv_password: \"snmp-priv-pass\"\n"
+ "# # update the snmp_v3_extra_args to match the snmptrap sever\n"
+ "# # should only need to be modified if disabling authentication and/or encryption\n"
+ "# snmp_v3_extra_args:\n"
+ "# # use authentication\n"
+ "# - --snmp.authentication-enabled\n"
+ "# - --snmp.authentication-protocol={{ .Values.monitoring.alertmanager.webhooks.snmp_notifier.config.auth.auth_protocol }}\n"
+ "# # but no encryption\n"
+ "# - --no-snmp.private-enabled\n\n"
+ "logging:\n"
+ " # loki_url:\n"
+ " # - for mgmt cluster, the local Loki k8s service is used\n"
+ " # - for workload clusters, this is overridden via mgmt_cluster_state_values.logging.loki_url\n"
+ " # to point to the mgmt cluster Loki\n"
+ " loki_url: http://loki-gateway.loki.svc.cluster.local\n"
+ " # the following values are mapped into sylva-logging-flows Helm unit values\n"
+ " # (see https://gitlab.com/sylva-projects/sylva-elements/helm-charts/sylva-logging-flows)\n"
+ " flows: {}\n"
+ " clusterflows: {}\n"
+ " outputs: {}\n"
+ " clusteroutputs: {}\n\n"
+ "metallb: {}\n"
+ " #l2_lbs: # add your metallb-l2 config if required\n"
+ " #address_pools:\n"
+ " # - name: my-custom-pool\n"
+ " # addresses:\n"
+ " # - 10.122.22.151/32\n"
+ " # - 10.10.10.10-10.10.10.120\n"
+ " #l2_options:\n"
+ " # advertisements:\n"
+ " # - node_selectors:\n"
+ " # - kubernetes.io/hostname: hostB # to limit the set of nodes for a given advertisement, the node selector must be set\n"
+ " # interface: eth1 # interfaces selector can also be used together with node_selectors\n"
+ " # advertised_pools:\n"
+ " # - my-custom-pool # additional IP pools to be advertised to this peer\n\n"
+ " #bgp_lbs: # add your metallb-l3 config if required\n"
+ " #l3_options:\n"
+ " # bfd_profiles:\n"
+ " # bfd-profile-bgp:\n"
+ " # receiveInterval: 1000\n"
+ " # transmitInterval: 1000\n"
+ " # detectMultiplier: 3\n"
+ " # bgp_peers:\n"
+ " # bgp-peer1:\n"
+ " # local_asn: 64511 # example only, must be updated\n"
+ " # peer_asn: 64510 # example only, must be updated\n"
+ " # peer_address: 10.122.22.129\n"
+ " # password: bgp-peer-password\n"
+ " # bfd_profile: bfd-profile-bgp\n"
+ " # node_selectors:\n"
+ " # - matchLabels:\n"
+ " # kubernetes.io/hostname: hostB\n"
+ " # advertised_pools:\n"
+ " # - lbpool # default IP pool used for kube-api and ingress exposure\n"
+ " # - my-custom-pool # additional IP pools to be advertised to this peer\n"
+ " # receive_routes: # optional, only for learning routes from BGP peers on secondary interfaces\n"
+ " # mode: all\n"
+ " # # or\n"
+ " # prefixes:\n"
+ " # - prefix: 10.10.10.10/32\n"
+ " # - prefix: 192.168.2.0/24\n"
+ " # ge: 25\n"
+ " # le: 28\n"
+ " #address_pools:\n"
+ " # my-custom-pool:\n"
+ " # addresses:\n"
+ " # - 10.122.22.151/32\n"
+ " # - 10.10.10.10-10.10.10.120\n\n"
+ "calico_helm_values:\n"
+ " tolerations:\n"
+ " - effect: NoExecute\n"
+ " key: node.kubernetes.io/not-ready\n"
+ " operator: Exists\n"
+ " - effect: NoSchedule\n"
+ " key: node.kubernetes.io/not-ready\n"
+ " operator: Exists\n"
+ " - effect: NoSchedule\n"
+ " key: node.cloudprovider.kubernetes.io/uninitialized\n"
+ " operator: Exists\n"
+ " - effect: NoSchedule\n"
+ " key: node.kubernetes.io/network-unavailable\n"
+ " operator: Exists\n"
+ " global:\n"
+ " clusterDNS: '{{ .Values._internal.calico.clusterDNS }}'\n"
+ " clusterDomain: '{{ .Values._internal.calico.clusterDomain }}'\n"
+ " serviceCIDR: '{{ .Values._internal.calico.serviceCIDR }}'\n"
+ " installation:\n"
+ " calicoNetwork:\n"
+ " bgp: Disabled\n"
+ " mtu: |\n"
+ " {{ .Values._internal.calico_mtu | include \"preserve-type\" }}\n"
+ " nodeAddressAutodetectionV4:\n"
+ " '{{\n"
+ " tuple\n"
+ " .Values._internal.default_calico_autodetection_method\n"
+ " (not .Values._internal.capo_calico_autodetection_method_use_canReach_vip)\n"
+ " | include \"set-only-if\"\n"
+ " }}'\n"
+ " ipPools:\n"
+ " - cidr: '{{ first .Values.cluster.cluster_pods_cidrs }}'\n"
+ " encapsulation: VXLAN\n"
+ " natOutgoing: Enabled\n"
+ " registry: UseDefault\n"
+ " felixConfiguration:\n"
+ " wireguardEnabled: '{{ .Values.security.calico_wireguard_enabled | include \"preserve-type\" }}'\n\n"
+ "metallb_helm_values:\n"
+ " prometheus: >-\n"
+ " {{\n"
+ " tuple (dict\n"
+ " \"rbacPrometheus\" true\n"
+ " \"serviceAccount\" \"rancher-monitoring-prometheus\"\n"
+ " \"namespace\" \"cattle-monitoring-system\"\n"
+ " \"serviceMonitor\" (dict \"enabled\" true)\n"
+ " )\n"
+ " (tuple . \"monitoring\" | include \"unit-enabled\")\n"
+ " | include \"set-only-if\"\n"
+ " }}\n\n"
+ " loadBalancerClass: '{{ .Values._internal.loadBalancerClass }}'\n"
+ " controller:\n"
+ " image:\n"
+ " repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/metallb-controller\n"
+ " tag: v0.15.2-sylva-custom\n"
+ " nodeSelector:\n"
+ " node-role.kubernetes.io/control-plane: '{{ .Values._internal.cp_node_label_value }}'\n"
+ " tolerations:\n"
+ " - key: node.cloudprovider.kubernetes.io/uninitialized\n"
+ " value: \"true\"\n"
+ " effect: NoSchedule\n"
+ " - effect: NoExecute\n"
+ " key: node-role.kubernetes.io/etcd\n"
+ " - effect: NoSchedule\n"
+ " key: node-role.kubernetes.io/master\n"
+ " - effect: NoSchedule\n"
+ " key: node-role.kubernetes.io/control-plane\n"
+ " speaker:\n"
+ " image:\n"
+ " repository: registry.gitlab.com/sylva-projects/sylva-elements/container-images/sandbox-registry/metallb-speaker\n"
+ " tag: v0.15.2-sylva-custom\n"
+ " ignoreExcludeLB: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" | include \"as-bool\" }}'\n"
+ " priorityClassName: system-cluster-critical\n"
+ " frr:\n"
+ " enabled: false\n"
+ " tolerations:\n"
+ " - key: node.cloudprovider.kubernetes.io/uninitialized\n"
+ " value: \"true\"\n"
+ " effect: NoSchedule\n"
+ " - effect: NoExecute\n"
+ " key: node-role.kubernetes.io/etcd\n"
+ " - effect: NoSchedule\n"
+ " key: node-role.kubernetes.io/master\n"
+ " - effect: NoSchedule\n"
+ " key: node-role.kubernetes.io/control-plane\n"
+ " frrk8s:\n"
+ " enabled: '{{ tuple . .Values._internal.metallb_frrk8s_required | include \"interpret-as-bool\" }}'\n\n"
+ "_internal:\n\n"
+ " state:\n"
+ " # is_upgrade reflects whether or not the currently processed release\n"
+ " # is an \"upgrade\", ie. a new release coming after a full installation\n"
+ " # of Sylva.\n"
+ " #\n"
+ " # Helm built-in .Release.IsUpgrade is not used because it reflects only whether\n"
+ " # or not we are on the first version of the Helm release, but because of how pivot\n"
+ " # is done, and because there may be failed attempts before the first that succeeds,\n"
+ " # we have cases where .Release.IsUpgrade is true while we haven't ever reached yet\n"
+ " # a full installation.\n"
+ " #\n"
+ " # The criteria we rely on is whether this release of sylva-units reached reconciliation\n"
+ " # of the sylva-units-status Kustomization, which is the last reconciliation of the\n"
+ " # dependency chain.\n"
+ " is_upgrade: >-\n"
+ " {{ gt (lookup \"kustomize.toolkit.fluxcd.io/v1\" \"Kustomization\" .Release.Namespace \"sylva-units-status\" | dig \"status\" \"inventory\" \"entries\" list | len) 0\n"
+ " | include \"preserve-type\"\n"
+ " }}\n\n"
+ " is_multus_uninstall: >-\n"
+ " {{- tuple . \"_internal.state.is_upgrade\" | include \"interpret\" -}}\n"
+ " {{- $multus_is_deployed := lookup \"v1\" \"ConfigMap\" .Release.Namespace \"sylva-units-status\" | dig \"data\" \"enabled-units\" \"{}\" | fromYaml | dig \"multus\" false -}}\n"
+ " {{- $multus_is_disabled := not (tuple . \"multus\" | include \"unit-enabled\") -}}\n"
+ " {{ and .Values._internal.state.is_upgrade $multus_is_disabled $multus_is_deployed | include \"preserve-type\" }}\n\n"
+ " k8s_version: '{{ include \"interpret-as-string\" (tuple . .Values.cluster.k8s_version) }}'\n"
+ " mgmt_cluster: false\n\n"
+ " controlplane_kind: '{{ get (dict\n"
+ " \"cabpk\" \"KubeadmControlPlane\"\n"
+ " \"cabpr\" \"RKE2ControlPlane\"\n"
+ " \"cabpoa\" \"OpenshiftAssistedControlPlane\"\n"
+ " \"cabpck\" \"CK8sControlPlane\"\n"
+ " ) .Values.cluster.capi_providers.bootstrap_provider }}'\n\n"
+ " # compute the hash value for clouds_yaml\n"
+ " clouds_yaml_hash: >-\n"
+ " {{\n"
+ " .Values.cluster | dig \"capo\" \"clouds_yaml\" \"clouds\" dict | toYaml\n"
+ " | sha256sum | trunc 8\n"
+ " }}\n\n"
+ " # 'cluster_machines_ready_unit_deps' contains all the dependencies of the 'cluster' unit, direct and indirect,\n"
+ " # except the ones set due to depending on cluster-machines-ready.\n"
+ " # (this is used by unit_templates.base-deps)\n"
+ " # cluster_machines_ready_unit_deps: # to avoid a circular references loop between interpret variables, this is computed directly in units.yaml\n\n"
+ " ha_cluster:\n\n"
+ " # immutable ha_cluster.is_ha : keep track of first install mode HA (true) or non-HA (false)\n"
+ " is_ha: >-\n"
+ " {{\n"
+ " lookup \"v1\" \"Secret\" .Release.Namespace \"sylva-units-values\"\n"
+ " | dig \"data\" \"values\" \"\" | b64dec | fromYaml | default dict\n"
+ " | dig \"_internal\" \"ha_cluster\" \"is_ha\" (ge (int .Values.cluster.control_plane_replicas) 3 | ternary true false)\n"
+ " | include \"preserve-type\"\n"
+ " }}\n\n\n"
+ " # variable just used to trigger error while requested control_plane_replicas is incompatible with ha_cluster.is_ha computed during first install\n"
+ " checkpoint: >-\n"
+ " {{- $current_config := lookup \"v1\" \"Secret\" .Release.Namespace \"sylva-units-values\" | dig \"data\" \"values\" \"\" | b64dec | fromYaml | default dict -}}\n"
+ " {{- $is_ha := dig \"_internal\" \"ha_cluster\" \"is_ha\" \"initial run\" $current_config | toString -}}\n"
+ " {{- if not (eq $is_ha \"initial run\") -}}\n"
+ " {{- if (eq $is_ha \"false\") -}}\n"
+ " {{- (ge (int .Values.cluster.control_plane_replicas) 3) | ternary \"\" \"requested control_plane_replicas={{.Values.cluster.control_plane_replicas}} : compatible with initial non HA mode\" | required (print \"requested control_plane_replicas=\" .Values.cluster.control_plane_replicas \": can't continue, first install happened in non-HA mode, this value should be 1\") -}}\n"
+ " {{- else -}}\n"
+ " {{- (lt (int .Values.cluster.control_plane_replicas) 3) | ternary \"\" \"requested control_plane_replicas={{.Values.cluster.control_plane_replicas}} : compatible with initial HA mode\" | required (print \"requested control_plane_replicas=\" .Values.cluster.control_plane_replicas \": can't continue, current mode is HA, requires a value >=3\") -}}\n"
+ " {{- end -}}\n"
+ " {{- else -}}\n"
+ " install run, requested control_plane_replicas={{.Values.cluster.control_plane_replicas}}\n"
+ " {{- end -}}\n\n"
+ " # variable used for the validation of .Values.os_images fields for baremetal deployments\n"
+ " os_images_validation: >-\n"
+ " {{- if ( gt (.Values.os_images | len) 0 ) }}\n"
+ " {{- $capm3_required := tuple . \"os-image-server\" | include \"unit-enabled\" }}\n"
+ " {{- $capo_required := .Values.cluster.capi_providers.infra_provider | eq \"capo\" }}\n"
+ " {{- range $os_image_name, $os_image_props := .Values.os_images }}\n"
+ " {{- if $capm3_required }}\n"
+ " {{- $os_image_props.sha256 | required ( print \"'sha256' must be specified for os_image '\" $os_image_name \"'\" ) }}\n"
+ " {{- dig \"image-format\" \"\" $os_image_props | required ( print \"'image-format' must be specified for os_image '\" $os_image_name \"'. It can be raw or qcow2\" ) }}\n"
+ " {{- end }}\n"
+ " {{ if $capo_required }}\n"
+ " {{- $os_image_props.md5 | required ( print \"'md5' must be specified for os_image '\" $os_image_name \"'\" ) }}\n"
+ " {{- end }}\n"
+ " {{- end }}\n"
+ " {{- end }}\n\n"
+ " # default replica number computed from the ha_cluster.is_ha value\n"
+ " default_replicas: '{{ tuple . .Values._internal.ha_cluster.is_ha 3 1 | include \"interpret-ternary\" | int | include \"preserve-type\" }}'\n\n"
+ " # value of \"node-role.kubernetes.io/control-plane\" node label that differs between kubeadm and rke2\n"
+ " cp_node_label_value: '{{ ((tuple . .Values.cluster.capi_providers.bootstrap_provider | include \"interpret-as-string\") | eq \"cabpr\") | ternary \"true\" \"\" }}'\n\n"
+ " monitoring:\n"
+ " enabled: '{{ tuple . \"monitoring\" | include \"unit-enabled\" | include \"as-bool\" }}'\n"
+ " conditionals:\n"
+ " longhorn:\n"
+ " enabled: '{{ tuple . \"longhorn\" | include \"unit-enabled\" }}'\n"
+ " flux:\n"
+ " enabled: '{{ tuple . \"flux-system\" | include \"unit-enabled\" }}'\n"
+ " minio:\n"
+ " enabled: '{{ tuple . \"minio-monitoring\" | include \"unit-enabled\" }}'\n"
+ " loki:\n"
+ " enabled: '{{ tuple . \"loki\" | include \"unit-enabled\" }}'\n"
+ " kepler:\n"
+ " enabled: '{{ tuple . \"kepler\" | include \"unit-enabled\" }}'\n"
+ " snmp:\n"
+ " enabled: '{{ tuple . \"snmp-exporter\" | include \"unit-enabled\" }}'\n"
+ " crossplane:\n"
+ " enabled: '{{ tuple . \"crossplane\" | include \"unit-enabled\" }}'\n"
+ " harbor:\n"
+ " enabled: '{{ tuple . \"harbor\" | include \"unit-enabled\" }}'\n\n"
+ " thanos:\n"
+ " store_config_internal:\n"
+ " bucket: \"thanos\"\n"
+ " endpoint: \"minio.minio-monitoring.svc.cluster.local\"\n"
+ " insecure: false\n"
+ " http_config:\n"
+ " tls_config:\n"
+ " insecure_skip_verify: true\n"
+ " insecure_skip_verify: true\n"
+ " # Use reduced reduncancy storage class (EC:1) as we are using two replicas in longhorn\n"
+ " put_user_metadata:\n"
+ " X-Amz-Storage-Class: REDUCED_REDUNDANCY\n\n"
+ " objstoreConfig:\n"
+ " type: '{{ ternary \"S3\" .Values.monitoring.thanos.objstoreConfig.type (.Values.monitoring.thanos.objstoreConfig.config | empty) }}'\n"
+ " # construct objstoreConfig for intenal MinIO storage\n"
+ " # or use user defined options\n"
+ " config: |-\n"
+ " {{ if (.Values.monitoring.thanos.objstoreConfig.config | empty) }}\n"
+ " {{ .Values._internal.thanos.store_config_internal | include \"preserve-type\" }}\n"
+ " {{ else }}\n"
+ " {{ .Values.monitoring.thanos.objstoreConfig.config | include \"preserve-type\" }}\n"
+ " {{ end }}\n\n"
+ " # thanos user and password\n"
+ " thanos_user: '{{ .Values.cluster.name }}'\n"
+ " thanos_password: '{{ include \"internalPersistentRandomPasswd\" (tuple . \"thanos_password\") }}'\n\n"
+ " # loki user and password\n"
+ " loki_user: '{{ .Values.cluster.name }}'\n"
+ " loki_password: '{{ include \"internalPersistentRandomPasswd\" (tuple . \"loki_password\") }}'\n\n"
+ " # Grafana password\n"
+ " grafana_admin_password: '{{ include \"internalPersistentRandomPasswd\" (tuple . \"grafana_admin_password\") }}'\n\n"
+ " # kubevirt admin user and password\n"
+ " kubevirt_admin_user: 'admin'\n"
+ " kubevirt_admin_password: '{{ include \"internalPersistentRandomPasswd\" (tuple . \"kubevirt_admin_password\") }}'\n\n"
+ " default_password: '{{ include \"internalPersistentRandomPasswd\" (tuple . \"default_password\") }}'\n\n"
+ " default_longhorn_storage_class: longhorn\n\n"
+ " default_storage_class: >-\n"
+ " {{- if .Values.default_storage_class -}}\n"
+ " {{ .Values.default_storage_class }}\n"
+ " {{- else -}}\n"
+ " {{/* ensure .Values._internal.storage_class_unit_map is interpreted */}}\n"
+ " {{- tuple . \"_internal.storage_class_unit_map\" | include \"interpret\" -}}\n\n"
+ " {{- $envAll := . -}}\n"
+ " {{- $enabledSCs := list -}}\n"
+ " {{- $enabledUnits := list -}}\n"
+ " {{- range $sc, $unit := .Values._internal.storage_class_unit_map -}}\n"
+ " {{- if and (ne $sc \"local-path\") (tuple $envAll $unit | include \"unit-enabled\") -}}\n"
+ " {{- $enabledSCs = append $enabledSCs $sc -}}\n"
+ " {{- $enabledUnits = (not (has $unit $enabledUnits) | ternary (append $enabledUnits $unit) $enabledUnits) -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- if eq (len $enabledSCs) 1 -}}\n"
+ " {{- index $enabledSCs 0 -}}\n"
+ " {{- else if and (eq (len $enabledUnits) 1) (eq (index $enabledUnits 0) \"longhorn\") -}}\n"
+ " {{ .Values._internal.default_longhorn_storage_class }}\n"
+ " {{- else if eq (len $enabledSCs) 0 -}}\n"
+ " local-path\n"
+ " {{- else -}}\n"
+ " {{- fail (printf \"Multiple storage classes are available based on the enabled units (%s), you need to specify the default one via 'default_storage_class'\" ($enabledSCs|join \", \")) -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n\n"
+ " # maps storage class names to the name of the unit that implements it\n"
+ " # (a storage class does not need to appear in this map if the required unit has the same name)\n"
+ " storage_class_unit_map:\n"
+ " local-path: local-path-provisioner\n"
+ " single-replica-storageclass: longhorn\n"
+ " two-replicas-storageclass: longhorn\n"
+ " longhorn: longhorn\n"
+ " vsphere-csi: vsphere-csi-driver\n"
+ " '{{ .Values.openstack.storageClass.name }}': cinder-csi\n"
+ " cephfs-csi: ceph-csi-cephfs\n"
+ " nfs-ganesha: nfs-ganesha\n\n"
+ " default_storage_class_unit: '{{ tuple . .Values._internal.default_storage_class_unit_ref | include \"interpret-as-string\" }}'\n"
+ " default_storage_class_unit_ref: >-\n"
+ " {{- tuple . \"_internal.default_storage_class\" | include \"interpret\" -}}\n"
+ " {{- tuple . \"_internal.storage_class_unit_map\" | include \"interpret\" -}}\n"
+ " {{- .Values._internal.storage_class_unit_map | dig .Values._internal.default_storage_class (printf \"storage class unit not found for %s\" .Values._internal.default_storage_class) -}}\n\n"
+ " storage_class_RWX_support:\n"
+ " - longhorn\n"
+ " - two-replicas-storageclass\n"
+ " - single-replica-storageclass\n"
+ " - cephfs-csi\n"
+ " default_storage_class_RWX_support: >-\n"
+ " {{- $default_storage_class := tuple . .Values._internal.default_storage_class | include \"interpret-as-string\" -}}\n"
+ " {{- has $default_storage_class .Values._internal.storage_class_RWX_support -}}\n"
+ " default_storage_class_access_mode_rwx: '{{ eq (tuple . .Values._internal.default_storage_class_RWX_support | include \"interpret-as-string\") \"true\" | ternary \"ReadWriteMany\" \"ReadWriteOnce\" }}'\n\n"
+ " # we could use rwx as default when available, if we find a way to automate a migration from rwo to rwx\n"
+ " # (see https://gitlab.com/sylva-projects/sylva-core/-/issues/1455)\n"
+ " # we could also implement a way to select RWX on fresh deployments\n"
+ " # harbor_storage_access_mode: '{{ tuple . .Values._internal.default_storage_class_access_mode_rwx | include \"interpret-as-string\" }}'\n"
+ " harbor_storage_access_mode: ReadWriteOnce\n\n"
+ " vault_replicas: >-\n"
+ " {{ or (.Values.cluster.capi_providers.infra_provider | eq \"capd\") (and (lt (int .Values.cluster.control_plane_replicas) 3) (eq .Values._internal.default_storage_class_unit \"local-path\")) | ternary 1 3 }}\n\n"
+ " vault_affinity:\n"
+ " podAntiAffinity:\n"
+ " requiredDuringSchedulingIgnoredDuringExecution:\n"
+ " - labelSelector:\n"
+ " matchLabels:\n"
+ " app.kubernetes.io/name: vault\n"
+ " vault_cr: vault\n"
+ " topologyKey: kubernetes.io/hostname\n\n"
+ " vault_no_affinity: {}\n\n"
+ " keycloak_affinity:\n"
+ " podAntiAffinity:\n"
+ " requiredDuringSchedulingIgnoredDuringExecution:\n"
+ " - labelSelector:\n"
+ " matchExpressions:\n"
+ " - key: app\n"
+ " operator: In\n"
+ " values:\n"
+ " - sso\n"
+ " topologyKey: kubernetes.io/hostname\n\n"
+ " # this is used in use-oci-artifacts.values.yaml\n"
+ " # and in a few other places\n"
+ " sylva_core_version: '{{ .Chart.Version }}'\n\n"
+ " # We need to retrieve bootstrap_node_ip in management-cluster while using libvirt-metal emulation, it is stored in a ConfigMap by libvirt-metal unit in that case\n"
+ " bootstrap_node_ip: '{{ lookup \"v1\" \"ConfigMap\" \"sylva-system\" \"cluster-public-endpoint\" | dig \"data\" \"address\" \"not_found\" | default \"not_found\" }}'\n\n"
+ " display_external_ip_msg: '{{ and (.Values.openstack.floating_ip | eq \"\") (.Values.cluster_virtual_ip | eq \"\") | ternary \"It must resolve to the IP used to reach the cluster\" (printf \"It must resolve to %s\" .Values.display_external_ip ) }}'\n\n"
+ " k8s_version_map:\n"
+ " \"1.30\": '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary \"v1.30.14+rke2r2\" \"v1.30.14\" }}'\n"
+ " 1.31: >-\n"
+ " {{- $version := \"v1.31.13\" -}}\n"
+ " {{- if .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" -}}\n"
+ " {{- $version = \"v1.31.13+rke2r1\" -}}\n"
+ " {{- end -}}\n"
+ " {{- if .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpck\" -}}\n"
+ " {{- $version = \"v1.31.6\" -}}\n"
+ " {{- end -}}\n"
+ " {{- $version -}}\n"
+ " 1.32: >-\n"
+ " {{- $version := \"v1.32.9\" -}}\n"
+ " {{- if .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" -}}\n"
+ " {{- $version = \"v1.32.9+rke2r1\" -}}\n"
+ " {{- end -}}\n"
+ " {{- if .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpck\" -}}\n"
+ " {{- $version = \"v1.32.5\" -}}\n"
+ " {{- end -}}\n"
+ " {{- $version -}}\n\n"
+ " #compute and validate the number of nodes\n"
+ " check_node_count_vs_bmh: >-\n"
+ " {{- if eq .Values.cluster.capi_providers.infra_provider \"capm3\" -}}\n"
+ " {{- $node_groups := dict \"control-plane\" (dict \"requested_nodes\" .Values.cluster.control_plane_replicas \"available_bmh\" 0) -}}\n"
+ " {{- range $md_name, $md_def := .Values.cluster.machine_deployments -}}\n"
+ " {{- $_ := set $node_groups $md_name (dict \"requested_nodes\" $md_def.replicas \"available_bmh\" 0) -}}\n"
+ " {{- end -}}\n"
+ " {{- $available_bmh := 0 -}}\n"
+ " {{- range $bmh_name, $bmh_def := .Values.cluster.baremetal_hosts -}}\n"
+ " {{- $bmh_def := mergeOverwrite (deepCopy $.Values.cluster.baremetal_host_default) $bmh_def -}}\n"
+ " {{- $machine_group := tuple (dict \"Values\" (deepCopy $.Values.cluster)) $bmh_name $bmh_def | include \"getMatchingValuesDefinitionForBmh\" | fromJson -}}\n"
+ " {{- if $machine_group.machineGroupName -}}\n"
+ " {{- $machine_group_name := $machine_group.machineGroupName -}}\n"
+ " {{- $available_bmh = $node_groups | dig $machine_group_name \"available_bmh\" 0 | add 1 -}}\n"
+ " {{- $node_groups = mergeOverwrite $node_groups (dict $machine_group_name (dict \"available_bmh\" $available_bmh)) -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- range $node_group_name, $node_group_def := $node_groups -}}\n"
+ " {{- if lt (int $node_group_def.available_bmh) (int $node_group_def.requested_nodes) -}}\n"
+ " {{- printf \"Not enough available baremetal hosts for %s: %d requested, %d available\" $node_group_name $node_group_def.requested_nodes $node_group_def.available_bmh | fail -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{ $node_groups | include \"preserve-type\" }}\n"
+ " {{- end -}}\n\n"
+ " # total number of nodes in this cluster\n"
+ " node_count:\n"
+ " '\n"
+ " {{- $n := .Values.cluster.control_plane_replicas -}}\n"
+ " {{- tuple . \"cluster.machine_deployments\" | include \"interpret\" -}}\n"
+ " {{- range $_, $md := .Values.cluster.machine_deployments -}}\n"
+ " {{- $n = add $n $md.replicas -}}\n"
+ " {{- end -}}\n"
+ " {{ $n | include \"preserve-type\" }}\n"
+ " '\n\n"
+ "# total worker nodes in the cluster\n"
+ " worker_node_count: >-\n"
+ " {{- $n := 0 -}}\n"
+ " {{- tuple . \"cluster.machine_deployments\" | include \"interpret\" -}}\n"
+ " {{- range $_, $md := .Values.cluster.machine_deployments -}}\n"
+ " {{- $n = add $n $md.replicas -}}\n"
+ " {{- end -}}\n"
+ " {{ $n | include \"preserve-type\" }}\n\n"
+ " # upper bound to the number of longhorn nodes\n"
+ " longhorn_node_count_upper_bound: >-\n"
+ " {{- $count := 0 -}}\n"
+ " {{- if hasKey .Values.cluster \"baremetal_host_default\" -}}\n"
+ " {{- if hasKey .Values.cluster.baremetal_host_default \"longhorn_disk_config\" -}}\n"
+ " {{- $count = len .Values.cluster.baremetal_hosts -}}\n"
+ " {{- else -}}\n"
+ " {{- range $key, $value := .Values.cluster.baremetal_hosts -}}\n"
+ " {{- if hasKey $value \"longhorn_disk_config\" -}}\n"
+ " {{- $count = add $count 1 -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- else -}}\n"
+ " {{- range $key, $value := .Values.cluster.baremetal_hosts -}}\n"
+ " {{- if hasKey $value \"longhorn_disk_config\" -}}\n"
+ " {{- $count = add $count 1 -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- tuple . \"_internal.node_count\" | include \"interpret\" -}}\n"
+ " {{- $count = min $count .Values._internal.node_count -}}\n"
+ " {{- $count -}}\n\n\n"
+ " # this check will fail if a Longhorn storage class is selected as default\n"
+ " # and the cluster node/BMH settings are such that it can be determined that\n"
+ " # there aren't as many Longhorn nodes to support the number of replicas\n"
+ " # of the storage class\n"
+ " #\n"
+ " # this check can be disabled with, for instance:\n"
+ " # _internal.check_longhorn_storage_class_vs_replicas: forcefully-bypassed\n"
+ " check_longhorn_storage_class_vs_replicas: >-\n"
+ " {{- $min_nodes_for_class := (dict \"longhorn\" 3\n"
+ " \"two-replicas-storageclass\" 2) -}}\n"
+ " {{- range $sc,$min_nodes := $min_nodes_for_class -}}\n"
+ " {{- if and ($.Values.default_storage_class | eq $sc)\n"
+ " (lt ($.Values._internal.longhorn_node_count_upper_bound|int) $min_nodes) -}}\n"
+ " {{- fail (printf \"'%s' storage class is selected as default_storage_class, but less than %d Longhorn nodes are available (no more than %s BMH/Node(s) identified)\" $sc $min_nodes $.Values._internal.longhorn_node_count_upper_bound) -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n\n"
+ " os_image_server_images_configmap: capm3-os-image-server-os-images\n\n"
+ " default_calico_autodetection_method: >-\n"
+ " {{- if .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpk\" }}\n"
+ " {{- dict \"kubernetes\" \"NodeInternalIP\" | include \"preserve-type\" -}}\n"
+ " {{- else -}}\n"
+ " {{- dict \"canReach\" (not (empty .Values.cluster.mgmt_cluster_ip) | ternary .Values.cluster.mgmt_cluster_ip .Values.cluster_virtual_ip) | include \"preserve-type\" -}}\n"
+ " {{- end -}}\n\n"
+ " capo_calico_autodetection_method_use_canReach_vip:\n"
+ " '{{\n"
+ " and (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\")\n"
+ " (.Values.cluster.capi_providers.infra_provider | eq \"capo\")\n"
+ " (empty .Values.cluster.mgmt_cluster_ip) | include \"preserve-type\"\n"
+ " }}'\n\n"
+ " calico_readiness_unit: \"calico\"\n\n"
+ " metallb:\n"
+ " # for CAPO the cluster_virtual_ip is injected into 'metallb-resources' and 'cluster' units via capo-cluster-resources configmap\n"
+ " # so we omit it here in that case\n"
+ " cluster_virtual_ip: '{{ tuple .Values.cluster_virtual_ip (ne .Values.cluster.capi_providers.infra_provider \"capo\") | include \"set-only-if\" }}'\n"
+ " l2_lbs:\n"
+ " l2_options:\n"
+ " advertisements:\n"
+ " l2advertisement:\n"
+ " advertised_pools:\n"
+ " - lbpool # this is the pool generated by from cluster_virtual_ip\n"
+ " interfaces: >-\n"
+ " {{- if eq .Values.cluster.capi_providers.infra_provider \"capm3\" }}\n"
+ " {{- $control_plane_networks := mergeOverwrite (deepCopy .Values.cluster.capm3.networks) (deepCopy (.Values.cluster.control_plane.capm3.networks | default dict)) }}\n"
+ " {{ .Values.cluster.cluster_primary_interfaces | default (list ( $control_plane_networks.primary.interface)) | include \"preserve-type\" }}\n"
+ " {{- else -}}\n"
+ " {{- list | include \"preserve-type\" -}}\n"
+ " {{- end }}\n"
+ " node_selectors:\n"
+ " - matchLabels:\n"
+ " node-role.kubernetes.io/control-plane: '{{ .Values._internal.cp_node_label_value }}'\n\n"
+ " metallb_service_annotations: '{{ dict \"metallb.io/loadBalancerIPs\" .Values.cluster_virtual_ip \"metallb.io/allow-shared-ip\" \"cluster-external-ip\" | include \"preserve-type\" }}'\n\n"
+ " metallb_values_check: >-\n"
+ " {{- tuple . \"cluster.metallb_helm_values\" | include \"interpret\" -}}\n"
+ " {{- tuple . \"units.metallb.helmrelease_spec.values\" | include \"interpret\" -}}\n"
+ " {{- if ne (omit .Values.cluster.metallb_helm_values \"prometheus\" | toYaml) (omit .Values.units.metallb.helmrelease_spec.values \"prometheus\" | toYaml) }}\n"
+ " {{ printf \"The Helm values for Metallb are different between the cluster and the metallb unit. It is recomended to use `.Values.metallb_helm_values` to ensure consistency.\" | fail }}\n"
+ " {{- end }}\n\n"
+ " metallb_frrk8s_required: >-\n"
+ " {{- $metallb_frrk8s_required := false }}\n"
+ " {{- if (.Values.metallb | dig \"bgp_lbs\" \"l3_options\" \"bgp_peers\" \"\") }}\n"
+ " {{- range $_, $bgppeer := .Values.metallb.bgp_lbs.l3_options.bgp_peers }}\n"
+ " {{- if or $bgppeer.bfd_profile $bgppeer.receive_routes }}\n"
+ " {{- $metallb_frrk8s_required = true }}\n"
+ " {{- end }}\n"
+ " {{- end }}\n"
+ " {{- end }}\n"
+ " {{ tuple . $metallb_frrk8s_required | include \"interpret-as-bool\" }}\n\n"
+ " loadBalancerClass: '{{ tuple . (eq .Values.cluster.capi_providers.bootstrap_provider \"cabpr\") \"sylva.org/metallb-class\" \"kube-vip.io/kube-vip-class\" | include \"interpret-ternary\" }}'\n\n"
+ " kube_vip_service_annotations: '{{ dict \"kube-vip.io/loadbalancerIPs\" .Values.cluster_virtual_ip | include \"preserve-type\" }}'\n\n"
+ " lb_service_annotations: '{{ tuple . (tuple . \"metallb\" | include \"unit-enabled\") .Values._internal.metallb_service_annotations .Values._internal.kube_vip_service_annotations | include \"interpret-ternary\" | include \"preserve-type\" }}'\n\n"
+ " previous_values: '{{ omit (lookup \"v1\" \"Secret\" .Release.Namespace \"sylva-units-values\" | dig \"data\" \"values\" \"\" | b64dec | fromYaml | default dict) \"unit_templates\" | include \"preserve-type\" }}'\n\n"
+ " mgmt_cluster_default_os_selector: >-\n"
+ " {{- $selector := .Values.cluster | dig .Values.cluster.capi_providers.infra_provider \"os_image_selector\" dict | deepCopy -}}\n"
+ " {{- tuple .Values.cluster $selector | include \"finalize-os-image-selector\" | fromJson | include \"preserve-type\" -}}\n\n"
+ " immutable_values:\n"
+ " _internal:\n"
+ " default_storage_class:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"default storage cannot be updated (this error may result from enabling a new CSI unit resulting in having the automatic computation of the default storage class give a different result; if you are in this case you can simply set 'default_storage_class: <current default storage class>' in your environment values)\"\n"
+ " cluster_domain:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"The update of cluster domain is not supported\"\n"
+ " cluster:\n"
+ " name:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"Renaming a cluster is not supported\"\n"
+ " cluster_virtual_ip:\n"
+ " # The cluster virtual IP cannot be changed\n"
+ " #\n"
+ " # In the case where this IP isn't set by user but is determined dynamically (the case on CAPO today, where capo-cluster-resources allocates it)\n"
+ " # then cluster_virtual_ip will not always contain the virtual IP, and may instead be set to its default value.\n"
+ " #\n"
+ " # Since this default value has changed (today it's \"\",in the past it was \"55.55.55.55\"), the immutability check\n"
+ " # is disabled in the case where the old value is 55.55.55.55 and the new one is \"\".\n"
+ " _immutable: >-\n"
+ " {{- tuple . \"_internal.previous_values\" | include \"interpret\" -}}\n"
+ " {{ not (and (.Values._internal.previous_values.cluster_virtual_ip | eq \"55.55.55.55\")\n"
+ " (.Values.cluster_virtual_ip | eq \"\"))\n"
+ " | include \"preserve-type\"\n"
+ " }}\n"
+ " _immutable_comment: \"cluster_virtual_ip can't be changed once deployed\"\n"
+ " capi_providers:\n"
+ " bootstrap_provider:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"K8s distribution of a running cluster can't be changed\"\n"
+ " infra_provider:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"Underlying infrastructure of a running cluster can't be changed\"\n"
+ " _immutable: '{{ not .Values._internal.ha_cluster.is_ha | include \"preserve-type\" }}'\n"
+ " _immutable_comment: \"Cluster values can't be changed, as they would trigger a rolling upgrade that is not supported in non-HA mode\"\n\n"
+ " longhorn:\n"
+ " helmrelease_spec:\n"
+ " values:\n"
+ " persistence:\n"
+ " defaultClassReplicaCount:\n"
+ " _immutable: true\n"
+ " _immutable_comment: \"Default longhorn class replica count should not be changed as it impact longhorn upgrade\"\n\n"
+ " pdb_allow_unhealthy_pod_eviction:\n"
+ " target:\n"
+ " kind: PodDisruptionBudget\n"
+ " patch: |-\n"
+ " apiVersion: policy/v1\n"
+ " kind: PodDisruptionBudget\n"
+ " metadata:\n"
+ " name: not-used\n"
+ " spec:\n"
+ " unhealthyPodEvictionPolicy: AlwaysAllow\n\n"
+ " calico:\n"
+ " # for clusterDNS, take the .10 of the first subnet of cluster.cluster_services_cidrs\n"
+ " # FIXME: this will not work for subnets not starting at 0 (e.g. 192.168.1.128/25)\n"
+ " clusterDNS: '{{ regexReplaceAll \"[.]0/.*\" (first .Values.cluster.cluster_services_cidrs) \".10\" }}'\n"
+ " clusterDomain: cluster.local\n"
+ " serviceCIDR: '{{ first .Values.cluster.cluster_services_cidrs }}'\n\n"
+ " base_cni_mtu_default: 1450\n"
+ " base_cni_mtu_per_infra:\n"
+ " capm3: |-\n"
+ " {{- tuple . \"cluster.capm3.default_network_settings\" | include \"interpret\" -}}\n"
+ " {{- $base_cni_mtu := .Values.cluster.capm3.default_network_settings.mtu -}}\n\n"
+ " {{- $control_plane_primary_interface := .Values.cluster | dig \"control_plane\" \"capm3\" \"networks\" \"primary\" \"interface\" .Values.cluster.capm3.networks.primary.interface -}}\n"
+ " {{- if not (eq $control_plane_primary_interface \"\") -}}\n"
+ " {{- $base_cni_mtu = tuple (dict \"Values\" (dict \"capm3\" .Values.cluster.capm3))\n"
+ " $control_plane_primary_interface\n"
+ " (.Values.cluster.control_plane.network_interfaces | dig $control_plane_primary_interface dict)\n"
+ " .Values.cluster.capm3.networks\n"
+ " .Values.cluster.control_plane.network_interfaces | include \"calculate-mtu\" | int -}}\n"
+ " {{- end -}}\n"
+ " {{- $base_cni_mtu | include \"preserve-type\" -}}\n"
+ " base_cni_mtu: >-\n"
+ " {{\n"
+ " .Values._internal.base_cni_mtu_per_infra\n"
+ " | dig .Values.cluster.capi_providers.infra_provider .Values._internal.base_cni_mtu_default\n"
+ " | include \"preserve-type\"\n"
+ " }}\n\n"
+ " calico_encapsulation_overhead: |-\n"
+ " {{- tuple . \"_internal.base_cni_mtu\" | include \"interpret\" -}}\n\n"
+ " {{- $encapsulation_header := dict\n"
+ " \"ipip\" 20\n"
+ " \"ipipcrosssubnet\" 20\n"
+ " \"vxlan\" 50\n"
+ " \"vxlancrosssubnet\" 50\n"
+ " \"wireguard\" 60\n"
+ " \"none\" 0\n"
+ " -}}\n"
+ " {{- $max_overhead := 0 -}}\n\n"
+ " {{- /* Iterate through all ipPools to find maximum encapsulation overhead */ -}}\n"
+ " {{- range .Values.calico_helm_values.installation.calicoNetwork.ipPools -}}\n"
+ " {{- if .encapsulation -}}\n"
+ " {{- $current_encapsulation := .encapsulation | lower -}}\n"
+ " {{- if hasKey $encapsulation_header $current_encapsulation -}}\n"
+ " {{- $max_overhead = max $max_overhead (index $encapsulation_header $current_encapsulation) -}}\n"
+ " {{- else -}}\n"
+ " {{- fail (printf \"Unknown encapsulation type '%s' in calico_helm_values.installation.calicoNetwork.ipPools. Supported types are: %s\" $current_encapsulation ((omit $encapsulation_header \"wireguard\") | keys | join \", \")) -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n\n"
+ " {{- /* Check for wireguard if enabled */ -}}\n"
+ " {{- if .Values.security.calico_wireguard_enabled | default false -}}\n"
+ " {{- $max_overhead = max $max_overhead (index $encapsulation_header \"wireguard\") -}}\n"
+ " {{- end -}}\n"
+ " {{- $max_overhead | include \"preserve-type\" -}}\n\n"
+ " calico_mtu: |-\n"
+ " {{- /* Calculate final MTU */ -}}\n"
+ " {{- $mtu := sub .Values._internal.base_cni_mtu .Values._internal.calico_encapsulation_overhead -}}\n"
+ " {{- $mtu | include \"preserve-type\" -}}\n\n"
+ " # Substitute with the cluster_virtual_ip value if it is defined to another value than the default, mandatory with capm3\n"
+ " # Otherwise, the value is substituted from the capo-cluster-resources when using capo\n"
+ " cluster_virtual_ip: '{{ tuple .Values.cluster_virtual_ip (ne .Values.cluster.capi_providers.infra_provider \"capo\") | include \"set-only-if\" }}'\n\n"
+ " coredns:\n"
+ " # for clusterDNS, take the .10 of the first subnet of cluster.cluster_services_cidrs\n"
+ " # FIXME: this will not work for subnets not starting at 0 (e.g. 192.168.1.128/25)\n"
+ " clusterDNS: '{{ regexReplaceAll \"[.]0/.*\" (first .Values.cluster.cluster_services_cidrs) \".10\" }}'\n"
+ " clusterDomain: cluster.local\n"
+ " serviceCIDR: '{{ first .Values.cluster.cluster_services_cidrs }}'\n\n"
+ " # The labels of coredns differ between rke2 and kubeadm\n"
+ " rke2_coredns_selector: app.kubernetes.io/instance == 'rke2-coredns'\n"
+ " kubeadm_coredns_selector: k8s-app == 'kube-dns'\n"
+ " coredns_selector: '{{ (.Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\") | ternary .Values._internal.rke2_coredns_selector .Values._internal.kubeadm_coredns_selector }}'\n\n"
+ " # renovate: datasource=docker\n"
+ " oci_tools_image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/oci-tools:0.3.1\n\n"
+ " # renovate: datasource=docker\n"
+ " keytool_image: registry.gitlab.com/sylva-projects/sylva-elements/container-images/keytool-image:0.2.1\n\n"
+ " node_collector_volumes_default:\n"
+ " - name: var-lib-etcd\n"
+ " hostPath:\n"
+ " path: /var/lib/etcd\n"
+ " - name: var-lib-kubelet\n"
+ " hostPath:\n"
+ " path: /var/lib/kubelet\n"
+ " - name: var-lib-kube-scheduler\n"
+ " hostPath:\n"
+ " path: /var/lib/kube-scheduler\n"
+ " - name: var-lib-kube-controller-manager\n"
+ " hostPath:\n"
+ " path: /var/lib/kube-controller-manager\n"
+ " - name: etc-systemd\n"
+ " hostPath:\n"
+ " path: /etc/systemd\n"
+ " - name: lib-systemd\n"
+ " hostPath:\n"
+ " path: /lib/systemd\n"
+ " - name: etc-kubernetes\n"
+ " hostPath:\n"
+ " path: /etc/kubernetes\n"
+ " - name: etc-cni-netd\n"
+ " hostPath:\n"
+ " path: /etc/cni/net.d/\n\n"
+ " node_collector_volumes: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary (append .Values._internal.node_collector_volumes_default (dict \"name\" \"var-lib-rancher-rke2\" \"hostPath\" (dict \"path\" \"/var/lib/rancher/rke2\"))) .Values._internal.node_collector_volumes_default | include \"preserve-type\" }}'\n\n"
+ " node_collector_volume_mounts_default:\n"
+ " - name: var-lib-etcd\n"
+ " mountPath: /var/lib/etcd\n"
+ " readOnly: true\n"
+ " - name: var-lib-kubelet\n"
+ " mountPath: /var/lib/kubelet\n"
+ " readOnly: true\n"
+ " - name: var-lib-kube-scheduler\n"
+ " mountPath: /var/lib/kube-scheduler\n"
+ " readOnly: true\n"
+ " - name: var-lib-kube-controller-manager\n"
+ " mountPath: /var/lib/kube-controller-manager\n"
+ " readOnly: true\n"
+ " - name: etc-systemd\n"
+ " mountPath: /etc/systemd\n"
+ " readOnly: true\n"
+ " - name: lib-systemd\n"
+ " mountPath: /lib/systemd/\n"
+ " readOnly: true\n"
+ " - name: etc-kubernetes\n"
+ " mountPath: /etc/kubernetes\n"
+ " readOnly: true\n"
+ " - name: etc-cni-netd\n"
+ " mountPath: /etc/cni/net.d/\n"
+ " readOnly: true\n\n"
+ " node_collector_volume_mounts: '{{ .Values.cluster.capi_providers.bootstrap_provider | eq \"cabpr\" | ternary (append .Values._internal.node_collector_volume_mounts_default (dict \"name\" \"var-lib-rancher-rke2\" \"mountPath\" \"/var/lib/rancher/rke2\" \"readOnly\" true)) .Values._internal.node_collector_volume_mounts_default | include \"preserve-type\" }}'\n\n"
+ "mgmt_cluster_state_values:\n"
+ " cluster:\n"
+ " mgmt_cluster_ip: '{{ .Values.display_external_ip }}'\n"
+ " capm3:\n"
+ " image_provisioning_host: '{{ .Values.display_external_ip }}'\n"
+ " image_provisioning_scheme: '{{ .Values.cluster.capm3.image_provisioning_scheme }}'\n"
+ " proxies:\n"
+ " http_proxy: '{{ .Values.proxies.http_proxy }}'\n"
+ " https_proxy: '{{ .Values.proxies.https_proxy }}'\n"
+ " no_proxy: '{{ include \"sylva-units.no_proxy\" (tuple .) }}'\n\n"
+ " units:\n"
+ " cluster-import-init:\n"
+ " enabled: '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " cluster-import:\n"
+ " enabled: '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n"
+ " cluster-import-check:\n"
+ " enabled: '{{ tuple . \"rancher\" | include \"unit-enabled\" }}'\n\n"
+ " monitoring:\n"
+ " thanos:\n"
+ " receive_url: \"https://{{ .Values.external_hostnames.thanos_receive }}/api/v1/receive\"\n"
+ " logging:\n"
+ " loki_url: \"https://{{ .Values.external_hostnames.loki }}\"\n\n"
+ " _internal:\n"
+ " workload_clusters_cluster_subdomain: wclusters.{{ .Values.cluster_domain }} # workload clusters cluster_domain will all be under wclusters.<cluster_domain>\n"
+ " sylva_mgmt_enabled_units: >-\n"
+ " {{- $units := dict -}}\n"
+ " {{- range $unit_name, $unit_def := .Values.units -}}\n"
+ " {{- if include \"unit-enabled\" (tuple $ $unit_name) -}}\n"
+ " {{- $_ := set $units $unit_name true -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- $units | include \"preserve-type\" -}}\n"
+ " mgmt_cluster_service_names: >-\n"
+ " {{- $mgmt_services := list -}}\n"
+ " {{- range $name, $hostname := .Values.external_hostnames }}\n"
+ " {{- if has $name (list\n"
+ " \"loki\"\n"
+ " \"rancher\"\n"
+ " \"thanos_receive\"\n"
+ " \"harbor\"\n"
+ " \"keycloak\"\n"
+ " ) -}}\n"
+ " {{- $mgmt_services = append $mgmt_services $hostname -}}\n"
+ " {{- end -}}\n"
+ " {{- end -}}\n"
+ " {{- $mgmt_services | include \"preserve-type\" -}}\n\n"
+ "workload_clusters:\n"
+ " teams: {}\n\n"
+ "snmp:\n"
+ " devices: []\n"
+ " auth: {}\n\n"
+ "rancher:\n"
+ " auth_user_info_max_age_seconds: \"0\"\n"
+ " auth_user_info_resync_cron: \"*/4 * * * *\"\n\n"
+ "# Sample snmp configuration as it needs to be added in the secrets.yaml file\n"
+ "# snmp:\n"
+ "# devices:\n"
+ "# - alias: Server1\n"
+ "# target: 1.2.3.4\n"
+ "# module: dell_idrac\n"
+ "# auth: snmpv3\n"
+ "# cluster_name: 'Client1'\n"
+ "# - alias: Server2\n"
+ "# target: 2.3.4.5\n"
+ "# module: hp_cpq\n"
+ "# auth: snmpv2\n"
+ "# cluster_name: 'Client2'\n"
+ "# auth:\n"
+ "# snmpv3:\n"
+ "# version: 3\n"
+ "# community: public\n"
+ "# security_level: authPriv\n"
+ "# username: snmp\n"
+ "# password: xxxxx\n"
+ "# auth_protocol: SHA256\n"
+ "# priv_protocol: AES\n"
+ "# priv_password: xxxxx\n"
+ "# snmpv2:\n"
+ "# version: 2\n"
+ "# community: public\n\n"
+ "openshift:\n"
+ " assisted:\n"
+ " dbSize: 8Gi\n"
+ " fsSize: 8Gi\n"
+ " imagestoreSize: 10Gi\n"
+ " osImages:\n"
+ " - openshiftVersion: \"4.17.0\"\n"
+ " cpuArchitecture: \"x86_64\"\n"
+ " url: \"https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.17/4.17.0/rhcos-4.17.0-x86_64-live.x86_64.iso\"\n"
+ " version: \"4.17.0\"\n"
+ " - openshiftVersion: \"4.19\"\n"
+ " cpuArchitecture: \"x86_64\"\n"
+ " url: \"https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.19/4.19.0/rhcos-live-iso.x86_64.iso\"\n"
+ " version: \"4.19.0\"\n\n"
+ " # variable to specify the openshift assisted service hostname\n"
+ " # serviceHostname: openshift-assisted-service.example.com\n"
+ " # variable to specify the openshift assisted image service hostname\n"
+ " # imageHostname: openshift-assisted-images.example.com\n\n"
+ "# backup:\n"
+ "# timestamped: false # if true, a timestamp YYYYMMDDHHMM is added to the name of the backup files before uploading them to the target bucket. If false, a new backup will overwrite a previous uploaded one.\n"
+ "# store:\n"
+ "# s3:\n"
+ "# host: \"\"\n"
+ "# access_key: \"\"\n"
+ "# secret_key: \"\"\n"
+ "# bucket: \"\"\n"
+ "# cert: \"\"\n";
final Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE);
final Matcher matcher = pattern.matcher(string);
while (matcher.find()) {
System.out.println("Full match: " + matcher.group(0));
for (int i = 1; i <= matcher.groupCount(); i++) {
System.out.println("Group " + i + ": " + matcher.group(i));
}
}
}
}
Please keep in mind that these code samples are automatically generated and are not guaranteed to work. If you find any syntax errors, feel free to submit a bug report. For a full regex reference for Java, please visit: https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html