From 3af7e034fc7a2b95fe108bd227e3f7dc39e53773 Mon Sep 17 00:00:00 2001 From: havelight-ee Date: Fri, 9 Dec 2022 13:38:44 +0900 Subject: [PATCH] ansible role update --- ansible/roles/helm_install/defaults/main.yml | 5 + .../roles/helm_install/files/druid/Chart.lock | 12 + .../roles/helm_install/files/druid/Chart.yaml | 41 + .../roles/helm_install/files/druid/README.md | 212 +++ .../druid/charts-archive/mysql-1.6.4.tgz | Bin 0 -> 11121 bytes .../druid/charts-archive/postgresql-8.6.4.tgz | Bin 0 -> 32017 bytes .../druid/charts-archive/zookeeper-2.1.4.tgz | Bin 0 -> 10795 bytes .../files/druid/charts/mysql/.helmignore | 2 + .../files/druid/charts/mysql/Chart.yaml | 21 + .../files/druid/charts/mysql/README.md | 242 +++ .../druid/charts/mysql/templates/NOTES.txt | 43 + .../druid/charts/mysql/templates/_helpers.tpl | 43 + .../configurationFiles-configmap.yaml | 12 + .../charts/mysql/templates/deployment.yaml | 252 +++ .../initializationFiles-configmap.yaml | 12 + .../druid/charts/mysql/templates/pvc.yaml | 29 + .../druid/charts/mysql/templates/secrets.yaml | 51 + .../mysql/templates/serviceaccount.yaml | 11 + .../mysql/templates/servicemonitor.yaml | 26 + .../druid/charts/mysql/templates/svc.yaml | 36 + .../mysql/templates/tests/test-configmap.yaml | 23 + .../charts/mysql/templates/tests/test.yaml | 54 + .../files/druid/charts/mysql/values.yaml | 231 +++ .../files/druid/charts/postgresql/.helmignore | 2 + .../files/druid/charts/postgresql/Chart.yaml | 19 + .../files/druid/charts/postgresql/README.md | 587 +++++++ .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../druid/charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/templates/NOTES.txt | 81 + .../charts/postgresql/templates/_helpers.tpl | 420 +++++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 26 + .../postgresql/templates/networkpolicy.yaml | 38 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 299 ++++ .../postgresql/templates/statefulset.yaml | 458 +++++ .../postgresql/templates/svc-headless.yaml | 19 + .../charts/postgresql/templates/svc-read.yaml | 31 + .../charts/postgresql/templates/svc.yaml | 38 + .../charts/postgresql/values-production.yaml | 520 ++++++ .../charts/postgresql/values.schema.json | 103 ++ .../files/druid/charts/postgresql/values.yaml | 526 ++++++ .../files/druid/charts/zookeeper/.helmignore | 21 + .../files/druid/charts/zookeeper/Chart.yaml | 17 + .../files/druid/charts/zookeeper/OWNERS | 6 + .../files/druid/charts/zookeeper/README.md | 145 ++ .../charts/zookeeper/templates/NOTES.txt | 7 + .../charts/zookeeper/templates/_helpers.tpl | 46 + .../templates/config-jmx-exporter.yaml | 19 + .../zookeeper/templates/config-script.yaml | 110 ++ .../zookeeper/templates/job-chroots.yaml | 65 + .../templates/poddisruptionbudget.yaml | 17 + .../zookeeper/templates/service-headless.yaml | 28 + .../charts/zookeeper/templates/service.yaml | 41 + .../zookeeper/templates/servicemonitors.yaml | 56 + .../zookeeper/templates/statefulset.yaml | 226 +++ .../files/druid/charts/zookeeper/values.yaml | 300 ++++ .../helm_install/files/druid/install.txt | 3 + .../files/druid/override-values.yaml | 225 +++ .../files/druid/override-values.yaml_221206 | 175 ++ .../files/druid/override-values.yaml_221207 | 194 +++ .../files/druid/override-values.yaml_old | 171 ++ .../files/druid/templates/NOTES.txt | 38 + .../files/druid/templates/_helpers.tpl | 100 ++ .../druid/templates/broker/deployment.yaml | 99 ++ .../files/druid/templates/broker/ingress.yaml | 58 + .../files/druid/templates/broker/service.yaml | 48 + .../files/druid/templates/configmap.yaml | 52 + .../templates/coordinator/deployment.yaml | 110 ++ .../druid/templates/coordinator/ingress.yaml | 58 + .../druid/templates/coordinator/service.yaml | 48 + .../druid/templates/historical/ingress.yaml | 58 + .../files/druid/templates/historical/pdb.yaml | 43 + .../druid/templates/historical/service.yaml | 48 + .../templates/historical/statefulset.yaml | 164 ++ .../druid/templates/middleManager/hpa.yaml | 40 + .../templates/middleManager/ingress.yaml | 58 + .../druid/templates/middleManager/pdb.yaml | 43 + .../templates/middleManager/service.yaml | 48 + .../templates/middleManager/statefulset.yaml | 164 ++ .../druid/templates/overlord/deployment.yaml | 105 ++ .../druid/templates/overlord/ingress.yaml | 58 + .../druid/templates/overlord/service.yaml | 42 + .../druid/templates/router/deployment.yaml | 99 ++ .../files/druid/templates/router/ingress.yaml | 58 + .../files/druid/templates/router/service.yaml | 49 + .../files/druid/templates/secrets.yaml | 28 + .../helm_install/files/druid/values.yaml | 419 +++++ .../files/elasticsearch/.helmignore | 2 + .../files/elasticsearch/Chart.yaml | 12 + .../helm_install/files/elasticsearch/Makefile | 1 + .../files/elasticsearch/README.md | 465 +++++ .../elasticsearch/examples/config/Makefile | 21 + .../elasticsearch/examples/config/README.md | 27 + .../examples/config/test/goss.yaml | 31 + .../elasticsearch/examples/config/values.yaml | 29 + .../examples/config/watcher_encryption_key | 1 + .../elasticsearch/examples/default/Makefile | 14 + .../elasticsearch/examples/default/README.md | 25 + .../examples/default/rolling_upgrade.sh | 19 + .../examples/default/test/goss.yaml | 44 + .../examples/docker-for-mac/Makefile | 13 + .../examples/docker-for-mac/README.md | 23 + .../examples/docker-for-mac/values.yaml | 23 + .../examples/kubernetes-kind/Makefile | 17 + .../examples/kubernetes-kind/README.md | 36 + .../kubernetes-kind/values-local-path.yaml | 23 + .../examples/kubernetes-kind/values.yaml | 23 + .../elasticsearch/examples/microk8s/Makefile | 13 + .../elasticsearch/examples/microk8s/README.md | 32 + .../examples/microk8s/values.yaml | 32 + .../elasticsearch/examples/migration/Makefile | 10 + .../examples/migration/README.md | 167 ++ .../examples/migration/client.yaml | 19 + .../examples/migration/data.yaml | 14 + .../examples/migration/master.yaml | 23 + .../elasticsearch/examples/minikube/Makefile | 13 + .../elasticsearch/examples/minikube/README.md | 38 + .../examples/minikube/values.yaml | 23 + .../elasticsearch/examples/multi/Makefile | 19 + .../elasticsearch/examples/multi/README.md | 29 + .../elasticsearch/examples/multi/client.yaml | 50 + .../elasticsearch/examples/multi/data.yaml | 48 + .../elasticsearch/examples/multi/master.yaml | 6 + .../examples/multi/test/goss.yaml | 12 + .../examples/networkpolicy/Makefile | 14 + .../examples/networkpolicy/values.yaml | 37 + .../elasticsearch/examples/openshift/Makefile | 13 + .../examples/openshift/README.md | 24 + .../examples/openshift/test/goss.yaml | 20 + .../examples/openshift/values.yaml | 11 + .../elasticsearch/examples/security/Makefile | 36 + .../elasticsearch/examples/security/README.md | 29 + .../examples/security/test/goss.yaml | 44 + .../examples/security/values.yaml | 28 + .../elasticsearch/examples/upgrade/Makefile | 19 + .../elasticsearch/examples/upgrade/README.md | 17 + .../examples/upgrade/test/goss.yaml | 22 + .../examples/upgrade/values.yaml | 6 + .../files/elasticsearch/override-values.yaml | 26 + .../files/elasticsearch/schema/es-ddl.sh | 310 ++++ .../files/elasticsearch/templates/NOTES.txt | 8 + .../elasticsearch/templates/_helpers.tpl | 97 ++ .../elasticsearch/templates/configmap.yaml | 34 + .../elasticsearch/templates/ingress.yaml | 64 + .../templates/networkpolicy.yaml | 61 + .../templates/poddisruptionbudget.yaml | 15 + .../templates/podsecuritypolicy.yaml | 18 + .../files/elasticsearch/templates/role.yaml | 25 + .../elasticsearch/templates/rolebinding.yaml | 20 + .../elasticsearch/templates/secret-cert.yaml | 14 + .../files/elasticsearch/templates/secret.yaml | 23 + .../elasticsearch/templates/service.yaml | 78 + .../templates/serviceaccount.yaml | 16 + .../elasticsearch/templates/statefulset.yaml | 427 +++++ .../test/test-elasticsearch-health.yaml | 50 + .../elasticsearch/tests/elasticsearch_test.py | 1504 ++++++++++++++++ .../files/elasticsearch/values.yaml | 356 ++++ .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 +++++ .../files/ingress-nginx/Chart.yaml | 23 + .../helm_install/files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 ++++++ .../files/ingress-nginx/README.md.gotmpl | 235 +++ .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 ++++++++ .../files/ingress-nginx/temp2.yaml | 725 ++++++++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 ++ .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 +++ .../templates/controller-deployment.yaml | 228 +++ .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 ++ .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 ++ .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 ++ .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 ++++++++++ .../helm_install/files/kafka/.helmignore | 22 + .../roles/helm_install/files/kafka/Chart.yaml | 5 + .../roles/helm_install/files/kafka/README.txt | 3 + .../files/kafka/charts/akhq/Chart.yaml | 19 + .../files/kafka/charts/akhq/LICENSE | 201 +++ .../files/kafka/charts/akhq/README.md | 124 ++ .../kafka/charts/akhq/templates/NOTES.txt | 21 + .../kafka/charts/akhq/templates/_helpers.tpl | 56 + .../charts/akhq/templates/configmap.yaml | 14 + .../charts/akhq/templates/deployment.yaml | 129 ++ .../kafka/charts/akhq/templates/ingress.yaml | 53 + .../kafka/charts/akhq/templates/secret.yaml | 19 + .../kafka/charts/akhq/templates/service.yaml | 31 + .../charts/akhq/templates/serviceaccount.yaml | 15 + .../files/kafka/charts/akhq/values.yaml | 145 ++ .../files/kafka/charts/kafka-ui/.helmignore | 25 + .../files/kafka/charts/kafka-ui/Chart.yaml | 7 + .../files/kafka/charts/kafka-ui/index.yaml | 3 + .../kafka/charts/kafka-ui/templates/NOTES.txt | 21 + .../charts/kafka-ui/templates/_helpers.tpl | 79 + .../charts/kafka-ui/templates/configmap.yaml | 10 + .../templates/configmap_fromValues.yaml | 11 + .../charts/kafka-ui/templates/deployment.yaml | 139 ++ .../kafka/charts/kafka-ui/templates/hpa.yaml | 28 + .../charts/kafka-ui/templates/ingress.yaml | 87 + .../templates/networkpolicy-egress.yaml | 18 + .../templates/networkpolicy-ingress.yaml | 18 + .../charts/kafka-ui/templates/secret.yaml | 9 + .../charts/kafka-ui/templates/service.yaml | 22 + .../kafka-ui/templates/serviceaccount.yaml | 12 + .../files/kafka/charts/kafka-ui/values.yaml | 151 ++ .../files/kafka/charts/kafka/.helmignore | 22 + .../kafka/charts/kafka/1.broker-config.yaml | 171 ++ .../files/kafka/charts/kafka/Chart.yaml | 5 + .../kafka/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/charts/kafka/templates/5.kafka.yaml | 124 ++ .../charts/kafka/templates/6.outside.yaml | 127 ++ .../files/kafka/charts/kafka/values.yaml | 73 + .../files/kafka/charts/zookeeper/.helmignore | 22 + .../files/kafka/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../zookeeper/templates/4.statefulset.yaml | 97 ++ .../files/kafka/charts/zookeeper/values.yaml | 68 + .../roles/helm_install/files/kafka/index.yaml | 3 + .../files/kafka/templates/role.yaml | 16 + ansible/roles/helm_install/files/kafka/test | 637 +++++++ .../helm_install/files/kafka/values.yaml | 199 +++ .../helm_install/files/keycloak/Chart.lock | 9 + .../helm_install/files/keycloak/Chart.yaml | 30 + .../helm_install/files/keycloak/README.md | 443 +++++ .../files/keycloak/charts/common/.helmignore | 22 + .../files/keycloak/charts/common/Chart.yaml | 23 + .../files/keycloak/charts/common/README.md | 347 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../files/keycloak/charts/common/values.yaml | 5 + .../keycloak/charts/postgresql/.helmignore | 21 + .../keycloak/charts/postgresql/Chart.lock | 6 + .../keycloak/charts/postgresql/Chart.yaml | 30 + .../keycloak/charts/postgresql/README.md | 662 +++++++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 23 + .../charts/postgresql/charts/common/README.md | 347 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../postgresql/charts/common/values.yaml | 5 + .../charts/postgresql/ci/extended-config.yaml | 4 + .../charts/postgresql/ci/init-scripts.yaml | 8 + .../charts/postgresql/ci/metrics.yaml | 24 + .../keycloak/charts/postgresql/ci/rbac.yaml | 16 + .../charts/postgresql/ci/replication.yaml | 5 + .../keycloak/charts/postgresql/ci/tls.yaml | 6 + .../charts/postgresql/templates/NOTES.txt | 89 + .../charts/postgresql/templates/_helpers.tpl | 320 ++++ .../postgresql/templates/extra-list.yaml | 4 + .../templates/networkpolicy-egress.yaml | 32 + .../templates/primary/configmap.yaml | 24 + .../templates/primary/extended-configmap.yaml | 18 + .../primary/initialization-configmap.yaml | 15 + .../templates/primary/metrics-configmap.yaml | 16 + .../templates/primary/metrics-svc.yaml | 31 + .../templates/primary/networkpolicy.yaml | 57 + .../templates/primary/prometheusrule.yaml | 22 + .../templates/primary/servicemonitor.yaml | 48 + .../templates/primary/statefulset.yaml | 639 +++++++ .../templates/primary/svc-headless.yaml | 31 + .../postgresql/templates/primary/svc.yaml | 43 + .../charts/postgresql/templates/psp.yaml | 41 + .../templates/read/networkpolicy.yaml | 36 + .../templates/read/statefulset.yaml | 433 +++++ .../templates/read/svc-headless.yaml | 33 + .../charts/postgresql/templates/read/svc.yaml | 45 + .../charts/postgresql/templates/role.yaml | 31 + .../postgresql/templates/rolebinding.yaml | 22 + .../charts/postgresql/templates/secrets.yaml | 29 + .../postgresql/templates/serviceaccount.yaml | 19 + .../postgresql/templates/tls-secrets.yaml | 27 + .../charts/postgresql/values.schema.json | 156 ++ .../keycloak/charts/postgresql/values.yaml | 1329 ++++++++++++++ .../files/keycloak/ci/ct-values.yaml | 2 + .../files/keycloak/ci/values-ha.yaml | 8 + .../files/keycloak/ci/values-hpa-pdb.yaml | 4 + .../keycloak/ci/values-init-scripts.yaml | 4 + .../ci/values-metrics-and-ingress.yaml | 9 + .../files/keycloak/override-values.yaml | 60 + .../files/keycloak/templates/NOTES.txt | 76 + .../files/keycloak/templates/_helpers.tpl | 273 +++ .../templates/configmap-env-vars.yaml | 37 + .../files/keycloak/templates/configmap.yaml | 18 + .../files/keycloak/templates/extra-list.yaml | 4 + .../keycloak/templates/headless-service.yaml | 30 + .../files/keycloak/templates/hpa.yaml | 35 + .../files/keycloak/templates/ingress.yaml | 58 + .../templates/init-scripts-configmap.yaml | 17 + .../keycloak-config-cli-configmap.yaml | 21 + .../templates/keycloak-config-cli-job.yaml | 120 ++ .../keycloak/templates/metrics-service.yaml | 30 + .../keycloak/templates/networkpolicy.yaml | 39 + .../files/keycloak/templates/pdb.yaml | 25 + .../files/keycloak/templates/role.yaml | 26 + .../files/keycloak/templates/rolebinding.yaml | 23 + .../files/keycloak/templates/secrets.yaml | 31 + .../files/keycloak/templates/service.yaml | 59 + .../keycloak/templates/serviceaccount.yaml | 22 + .../keycloak/templates/servicemonitor.yaml | 48 + .../files/keycloak/templates/statefulset.yaml | 361 ++++ .../files/keycloak/templates/tls-secret.yaml | 76 + .../helm_install/files/keycloak/values.yaml | 951 ++++++++++ ansible/roles/helm_install/files/kubeconfig | 20 + .../helm_install/files/mongo-dsk/.helmignore | 21 + .../helm_install/files/mongo-dsk/Chart.lock | 6 + .../helm_install/files/mongo-dsk/Chart.yaml | 30 + .../helm_install/files/mongo-dsk/README.md | 548 ++++++ .../files/mongo-dsk/charts/common/.helmignore | 22 + .../files/mongo-dsk/charts/common/Chart.yaml | 23 + .../files/mongo-dsk/charts/common/README.md | 345 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 128 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 52 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../files/mongo-dsk/charts/common/values.yaml | 5 + .../files/mongo-dsk/override-values.yaml | 49 + .../files/mongo-dsk/templates/NOTES.txt | 74 + .../files/mongo-dsk/templates/_helpers.tpl | 266 +++ .../config-server-configmap.yaml | 11 + .../config-server-poddisruptionbudget.yaml | 18 + .../config-server-podmonitor.yaml | 32 + .../config-server-statefulset.yaml | 376 ++++ .../files/mongo-dsk/templates/extra-list.yaml | 4 + .../mongo-dsk/templates/headless-service.yaml | 16 + .../templates/mongos/mongos-configmap.yaml | 11 + .../templates/mongos/mongos-dep-sts.yaml | 319 ++++ .../mongos/mongos-poddisruptionbudget.yaml | 18 + .../templates/mongos/mongos-podmonitor.yaml | 32 + .../mongos/mongos-service-per-replica.yaml | 48 + .../templates/mongos/mongos-service.yaml | 41 + .../replicaset-entrypoint-configmap.yaml | 30 + .../files/mongo-dsk/templates/secrets.yaml | 30 + .../mongo-dsk/templates/serviceaccount.yaml | 5 + .../shard/shard-arbiter-configmap.yaml | 11 + .../shard/shard-arbiter-statefulset.yaml | 337 ++++ .../templates/shard/shard-data-configmap.yaml | 11 + .../shard/shard-data-poddisruptionbudget.yaml | 23 + .../shard/shard-data-podmonitor.yaml | 35 + .../shard/shard-data-statefulset.yaml | 387 +++++ .../helm_install/files/mongo-dsk/values.yaml | 1217 +++++++++++++ .../files/mongo-manifest/.helmignore | 21 + .../files/mongo-manifest/Chart.lock | 6 + .../files/mongo-manifest/Chart.yaml | 30 + .../files/mongo-manifest/README.md | 548 ++++++ .../mongo-manifest/charts/common/.helmignore | 22 + .../mongo-manifest/charts/common/Chart.yaml | 23 + .../mongo-manifest/charts/common/README.md | 345 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 128 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 52 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../mongo-manifest/charts/common/values.yaml | 5 + .../files/mongo-manifest/override-values.yaml | 52 + .../files/mongo-manifest/templates/NOTES.txt | 74 + .../mongo-manifest/templates/_helpers.tpl | 266 +++ .../config-server-configmap.yaml | 11 + .../config-server-poddisruptionbudget.yaml | 18 + .../config-server-podmonitor.yaml | 32 + .../config-server-statefulset.yaml | 376 ++++ .../mongo-manifest/templates/extra-list.yaml | 4 + .../templates/headless-service.yaml | 16 + .../templates/mongos/mongos-configmap.yaml | 11 + .../templates/mongos/mongos-dep-sts.yaml | 319 ++++ .../mongos/mongos-poddisruptionbudget.yaml | 18 + .../templates/mongos/mongos-podmonitor.yaml | 32 + .../mongos/mongos-service-per-replica.yaml | 48 + .../templates/mongos/mongos-service.yaml | 41 + .../replicaset-entrypoint-configmap.yaml | 30 + .../mongo-manifest/templates/secrets.yaml | 30 + .../templates/serviceaccount.yaml | 5 + .../shard/shard-arbiter-configmap.yaml | 11 + .../shard/shard-arbiter-statefulset.yaml | 337 ++++ .../templates/shard/shard-data-configmap.yaml | 11 + .../shard/shard-data-poddisruptionbudget.yaml | 23 + .../shard/shard-data-podmonitor.yaml | 35 + .../shard/shard-data-statefulset.yaml | 387 +++++ .../files/mongo-manifest/values.yaml | 1217 +++++++++++++ .../helm_install/files/postgresql/.helmignore | 21 + .../helm_install/files/postgresql/Chart.lock | 6 + .../helm_install/files/postgresql/Chart.yaml | 30 + .../helm_install/files/postgresql/README.md | 662 +++++++ .../files/postgresql/charts/common-1.17.1.tgz | Bin 0 -> 14611 bytes .../postgresql/charts/common/.helmignore | 22 + .../files/postgresql/charts/common/Chart.yaml | 23 + .../files/postgresql/charts/common/README.md | 347 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../postgresql/charts/common/values.yaml | 5 + .../files/postgresql/ci/extended-config.yaml | 4 + .../files/postgresql/ci/init-scripts.yaml | 8 + .../files/postgresql/ci/metrics.yaml | 24 + .../files/postgresql/ci/rbac.yaml | 16 + .../files/postgresql/ci/replication.yaml | 5 + .../helm_install/files/postgresql/ci/tls.yaml | 6 + .../files/postgresql/override-values.yaml | 29 + .../files/postgresql/templates/NOTES.txt | 89 + .../files/postgresql/templates/_helpers.tpl | 320 ++++ .../postgresql/templates/extra-list.yaml | 4 + .../templates/networkpolicy-egress.yaml | 32 + .../templates/primary/configmap.yaml | 24 + .../templates/primary/extended-configmap.yaml | 18 + .../primary/initialization-configmap.yaml | 15 + .../templates/primary/metrics-configmap.yaml | 16 + .../templates/primary/metrics-svc.yaml | 31 + .../templates/primary/networkpolicy.yaml | 57 + .../templates/primary/prometheusrule.yaml | 22 + .../templates/primary/servicemonitor.yaml | 48 + .../templates/primary/statefulset.yaml | 639 +++++++ .../templates/primary/svc-headless.yaml | 31 + .../postgresql/templates/primary/svc.yaml | 43 + .../files/postgresql/templates/psp.yaml | 41 + .../templates/read/networkpolicy.yaml | 36 + .../templates/read/statefulset.yaml | 433 +++++ .../templates/read/svc-headless.yaml | 33 + .../files/postgresql/templates/read/svc.yaml | 45 + .../files/postgresql/templates/role.yaml | 31 + .../postgresql/templates/rolebinding.yaml | 22 + .../files/postgresql/templates/secrets.yaml | 29 + .../postgresql/templates/serviceaccount.yaml | 19 + .../postgresql/templates/tls-secrets.yaml | 27 + .../files/postgresql/values.schema.json | 156 ++ .../helm_install/files/postgresql/values.yaml | 1335 ++++++++++++++ .../helm_install/files/rabbitmq/.helmignore | 21 + .../helm_install/files/rabbitmq/Chart.lock | 6 + .../helm_install/files/rabbitmq/Chart.yaml | 26 + .../helm_install/files/rabbitmq/README.md | 604 +++++++ .../files/rabbitmq/charts/common-1.17.1.tgz | Bin 0 -> 14611 bytes .../files/rabbitmq/charts/common/.helmignore | 22 + .../files/rabbitmq/charts/common/Chart.yaml | 23 + .../files/rabbitmq/charts/common/README.md | 347 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../files/rabbitmq/charts/common/values.yaml | 5 + .../files/rabbitmq/ci/default-values.yaml | 1 + .../files/rabbitmq/ci/tolerations-values.yaml | 4 + .../files/rabbitmq/override-values.yaml | 31 + .../files/rabbitmq/templates/NOTES.txt | 172 ++ .../files/rabbitmq/templates/_helpers.tpl | 247 +++ .../rabbitmq/templates/configuration.yaml | 16 + .../files/rabbitmq/templates/extra-list.yaml | 4 + .../files/rabbitmq/templates/ingress.yaml | 60 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../files/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../files/rabbitmq/templates/role.yaml | 18 + .../files/rabbitmq/templates/rolebinding.yaml | 18 + .../files/rabbitmq/templates/secrets.yaml | 46 + .../rabbitmq/templates/serviceaccount.yaml | 15 + .../rabbitmq/templates/servicemonitor.yaml | 54 + .../files/rabbitmq/templates/statefulset.yaml | 375 ++++ .../rabbitmq/templates/svc-headless.yaml | 41 + .../files/rabbitmq/templates/svc.yaml | 99 ++ .../files/rabbitmq/templates/tls-secrets.yaml | 74 + .../files/rabbitmq/values.schema.json | 100 ++ .../helm_install/files/rabbitmq/values.yaml | 1215 +++++++++++++ .../helm_install/files/redis/.helmignore | 21 + .../roles/helm_install/files/redis/Chart.lock | 6 + .../roles/helm_install/files/redis/Chart.yaml | 28 + .../roles/helm_install/files/redis/README.md | 866 ++++++++++ .../files/redis/charts/common-1.17.1.tgz | Bin 0 -> 14611 bytes .../files/redis/charts/common/.helmignore | 22 + .../files/redis/charts/common/Chart.yaml | 23 + .../files/redis/charts/common/README.md | 345 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 128 ++ .../redis/charts/common/templates/_errors.tpl | 23 + .../redis/charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 52 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../files/redis/charts/common/values.yaml | 5 + .../files/redis/ci/extra-flags-values.yaml | 12 + .../files/redis/ci/sentinel-values.yaml | 6 + .../files/redis/ci/standalone-values.yaml | 1 + .../redis/img/redis-cluster-topology.png | Bin 0 -> 11448 bytes .../files/redis/img/redis-topology.png | Bin 0 -> 9709 bytes .../files/redis/override-values.yaml | 21 + .../files/redis/templates/NOTES.txt | 191 ++ .../files/redis/templates/_helpers.tpl | 291 ++++ .../files/redis/templates/configmap.yaml | 60 + .../files/redis/templates/extra-list.yaml | 4 + .../files/redis/templates/headless-svc.yaml | 30 + .../redis/templates/health-configmap.yaml | 192 +++ .../redis/templates/master/application.yaml | 467 +++++ .../files/redis/templates/master/psp.yaml | 46 + .../files/redis/templates/master/pvc.yaml | 26 + .../files/redis/templates/master/service.yaml | 49 + .../files/redis/templates/metrics-svc.yaml | 38 + .../files/redis/templates/networkpolicy.yaml | 78 + .../files/redis/templates/pdb.yaml | 23 + .../files/redis/templates/prometheusrule.yaml | 27 + .../files/redis/templates/replicas/hpa.yaml | 35 + .../redis/templates/replicas/service.yaml | 49 + .../redis/templates/replicas/statefulset.yaml | 455 +++++ .../files/redis/templates/role.yaml | 28 + .../files/redis/templates/rolebinding.yaml | 21 + .../redis/templates/scripts-configmap.yaml | 625 +++++++ .../files/redis/templates/secret.yaml | 17 + .../files/redis/templates/sentinel/hpa.yaml | 35 + .../templates/sentinel/node-services.yaml | 71 + .../templates/sentinel/ports-configmap.yaml | 100 ++ .../redis/templates/sentinel/service.yaml | 96 ++ .../redis/templates/sentinel/statefulset.yaml | 670 +++++++ .../files/redis/templates/serviceaccount.yaml | 21 + .../files/redis/templates/servicemonitor.yaml | 45 + .../files/redis/templates/tls-secret.yaml | 26 + .../files/redis/values.schema.json | 156 ++ .../helm_install/files/redis/values.yaml | 1536 +++++++++++++++++ .../roles/helm_install/files/vault/.gitignore | 1 + .../helm_install/files/vault/.helmignore | 28 + .../vault/00.old/override-values.yaml_221117 | 58 + .../vault/00.old/override-values.yaml_bak | 14 + .../helm_install/files/vault/CHANGELOG.md | 433 +++++ .../helm_install/files/vault/CONTRIBUTING.md | 247 +++ .../roles/helm_install/files/vault/Chart.yaml | 21 + .../roles/helm_install/files/vault/LICENSE | 355 ++++ .../roles/helm_install/files/vault/Makefile | 101 ++ .../roles/helm_install/files/vault/README.MD | 127 ++ .../files/vault/override-values.yaml | 82 + .../files/vault/override-values.yaml_bak | 82 + .../files/vault/templates/NOTES.txt | 14 + .../files/vault/templates/_helpers.tpl | 953 ++++++++++ .../vault/templates/csi-clusterrole.yaml | 18 + .../templates/csi-clusterrolebinding.yaml | 19 + .../files/vault/templates/csi-daemonset.yaml | 100 ++ .../vault/templates/csi-serviceaccount.yaml | 16 + .../templates/injector-certs-secret.yaml | 14 + .../vault/templates/injector-clusterrole.yaml | 19 + .../injector-clusterrolebinding.yaml | 19 + .../vault/templates/injector-deployment.yaml | 156 ++ .../templates/injector-disruptionbudget.yaml | 20 + .../templates/injector-mutating-webhook.yaml | 39 + .../templates/injector-network-policy.yaml | 24 + .../vault/templates/injector-psp-role.yaml | 20 + .../templates/injector-psp-rolebinding.yaml | 21 + .../files/vault/templates/injector-psp.yaml | 46 + .../files/vault/templates/injector-role.yaml | 29 + .../vault/templates/injector-rolebinding.yaml | 22 + .../vault/templates/injector-service.yaml | 22 + .../templates/injector-serviceaccount.yaml | 13 + .../templates/prometheus-prometheusrules.yaml | 26 + .../templates/prometheus-servicemonitor.yaml | 44 + .../templates/server-clusterrolebinding.yaml | 24 + .../templates/server-config-configmap.yaml | 40 + .../templates/server-discovery-role.yaml | 21 + .../server-discovery-rolebinding.yaml | 29 + .../templates/server-disruptionbudget.yaml | 26 + .../templates/server-ha-active-service.yaml | 46 + .../templates/server-ha-standby-service.yaml | 45 + .../templates/server-headless-service.yaml | 34 + .../files/vault/templates/server-ingress.yaml | 77 + .../templates/server-network-policy.yaml | 26 + .../vault/templates/server-psp-role.yaml | 20 + .../templates/server-psp-rolebinding.yaml | 21 + .../files/vault/templates/server-psp.yaml | 49 + .../files/vault/templates/server-route.yaml | 34 + .../files/vault/templates/server-service.yaml | 44 + .../templates/server-serviceaccount.yaml | 14 + .../vault/templates/server-statefulset.yaml | 210 +++ .../vault/templates/tests/server-test.yaml | 51 + .../files/vault/templates/ui-service.yaml | 37 + ansible/roles/helm_install/files/vault/test | 600 +++++++ .../helm_install/files/vault/tls/ca-cert.pem | 32 + .../helm_install/files/vault/tls/ca-cert.srl | 1 + .../helm_install/files/vault/tls/ca-key.pem | 52 + .../files/vault/tls/client-cert.pem | 33 + .../files/vault/tls/client-key.pem | 52 + .../files/vault/tls/client-req.pem | 27 + .../helm_install/files/vault/tls/ext.conf | 1 + .../helm_install/files/vault/tls/generator.sh | 94 + .../files/vault/tls/server-cert.pem | 33 + .../files/vault/tls/server-key.pem | 52 + .../files/vault/tls/server-req.pem | 27 + .../files/vault/values.openshift.yaml | 18 + .../files/vault/values.schema.json | 1030 +++++++++++ .../helm_install/files/vault/values.yaml | 1121 ++++++++++++ .../files/vault_agent/configmap.yaml | 52 + .../files/vault_agent/deployment.yaml | 42 + .../helm_install/files/vault_agent/pvc.yaml | 11 + .../files/vault_agent/test/configmap.yaml | 52 + .../files/vault_agent/test/deployment.yaml | 42 + .../files/vault_agent/test/pvc.yaml | 11 + .../helm_install/tasks/helm-chart-install.yml | 37 + .../tasks/helm-chart-install.yml_bak | 23 + .../helm_install/tasks/helm-chart-nginx.yml | 23 + .../roles/helm_install/tasks/helm-install.yml | 60 + ansible/roles/helm_install/tasks/main.yml | 6 + ansible/roles/kubernetes_install/README.md | 38 + .../kubernetes_install/defaults/main.yml | 131 ++ .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 +++++ .../files/ingress-nginx/Chart.yaml | 23 + .../files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 ++++++ .../files/ingress-nginx/README.md.gotmpl | 235 +++ .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 ++++++++ .../files/ingress-nginx/temp2.yaml | 725 ++++++++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 ++ .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 +++ .../templates/controller-deployment.yaml | 228 +++ .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 ++ .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 ++ .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 ++ .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 ++++++++++ .../roles/kubernetes_install/files/kubeconfig | 20 + .../kubernetes_install/handlers/main.yml | 10 + .../roles/kubernetes_install/meta/main.yml | 52 + .../tasks/helm-chart-nginx.yml | 13 + .../kubernetes_install/tasks/helm-install.yml | 60 + .../tasks/k8s-helm-chart.yml | 7 + .../kubernetes_install/tasks/k8s-main.yml | 68 + .../kubernetes_install/tasks/k8s-master.yml | 34 + .../kubernetes_install/tasks/k8s-node.yml | 6 + .../roles/kubernetes_install/tasks/main.yml | 10 + .../kubernetes_install/tasks/os-main.yml | 70 + .../kubernetes_install/tasks/os-runtime.yml | 45 + .../templates/config.toml.j2 | 5 + .../kubernetes_install/templates/hosts.j2 | 6 + .../templates/yaml2toml_macro.j2 | 58 + .../roles/kubernetes_install/tests/inventory | 17 + .../roles/kubernetes_install/tests/test.yml | 6 + .../roles/kubernetes_install/vars/main.yml | 2 + 890 files changed, 79234 insertions(+) create mode 100644 ansible/roles/helm_install/defaults/main.yml create mode 100644 ansible/roles/helm_install/files/druid/Chart.lock create mode 100644 ansible/roles/helm_install/files/druid/Chart.yaml create mode 100644 ansible/roles/helm_install/files/druid/README.md create mode 100644 ansible/roles/helm_install/files/druid/charts-archive/mysql-1.6.4.tgz create mode 100644 ansible/roles/helm_install/files/druid/charts-archive/postgresql-8.6.4.tgz create mode 100644 ansible/roles/helm_install/files/druid/charts-archive/zookeeper-2.1.4.tgz create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/.helmignore create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/Chart.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/NOTES.txt create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/_helpers.tpl create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/configurationFiles-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/deployment.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/initializationFiles-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/pvc.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/secrets.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/serviceaccount.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/servicemonitor.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/svc.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/mysql/values.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/.helmignore create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/Chart.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/ci/default-values.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/files/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/files/conf.d/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/NOTES.txt create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/_helpers.tpl create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/extended-config-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/initialization-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-configmap.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-svc.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/networkpolicy.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/prometheusrule.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/secrets.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/serviceaccount.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/servicemonitor.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset-slaves.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-headless.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-read.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/values-production.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/values.schema.json create mode 100755 ansible/roles/helm_install/files/druid/charts/postgresql/values.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/.helmignore create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/Chart.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/OWNERS create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/README.md create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/NOTES.txt create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/_helpers.tpl create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-jmx-exporter.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-script.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/job-chroots.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/poddisruptionbudget.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service-headless.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/servicemonitors.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/templates/statefulset.yaml create mode 100755 ansible/roles/helm_install/files/druid/charts/zookeeper/values.yaml create mode 100644 ansible/roles/helm_install/files/druid/install.txt create mode 100644 ansible/roles/helm_install/files/druid/override-values.yaml create mode 100644 ansible/roles/helm_install/files/druid/override-values.yaml_221206 create mode 100644 ansible/roles/helm_install/files/druid/override-values.yaml_221207 create mode 100644 ansible/roles/helm_install/files/druid/override-values.yaml_old create mode 100644 ansible/roles/helm_install/files/druid/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/druid/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/druid/templates/broker/deployment.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/broker/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/broker/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/coordinator/deployment.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/coordinator/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/coordinator/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/historical/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/historical/pdb.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/historical/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/historical/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/middleManager/hpa.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/middleManager/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/middleManager/pdb.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/middleManager/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/middleManager/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/overlord/deployment.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/overlord/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/overlord/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/router/deployment.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/router/ingress.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/router/service.yaml create mode 100644 ansible/roles/helm_install/files/druid/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/druid/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/.helmignore create mode 100644 ansible/roles/helm_install/files/elasticsearch/Chart.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/config/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/config/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/config/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/config/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/config/watcher_encryption_key create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/default/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/default/README.md create mode 100755 ansible/roles/helm_install/files/elasticsearch/examples/default/rolling_upgrade.sh create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/default/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values-local-path.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/microk8s/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/microk8s/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/microk8s/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/migration/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/migration/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/migration/client.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/migration/data.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/migration/master.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/minikube/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/minikube/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/minikube/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/client.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/data.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/master.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/multi/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/openshift/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/openshift/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/openshift/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/openshift/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/security/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/security/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/security/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/security/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/upgrade/Makefile create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/upgrade/README.md create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/upgrade/test/goss.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/examples/upgrade/values.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/override-values.yaml create mode 100755 ansible/roles/helm_install/files/elasticsearch/schema/es-ddl.sh create mode 100755 ansible/roles/helm_install/files/elasticsearch/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/ingress.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/podsecuritypolicy.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/secret-cert.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/secret.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/service.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/templates/test/test-elasticsearch-health.yaml create mode 100644 ansible/roles/helm_install/files/elasticsearch/tests/elasticsearch_test.py create mode 100644 ansible/roles/helm_install/files/elasticsearch/values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/.helmignore create mode 100644 ansible/roles/helm_install/files/ingress-nginx/CHANGELOG.md create mode 100644 ansible/roles/helm_install/files/ingress-nginx/Chart.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/OWNERS create mode 100644 ansible/roles/helm_install/files/ingress-nginx/README.md create mode 100644 ansible/roles/helm_install/files/ingress-nginx/README.md.gotmpl create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/override-values.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/temp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/temp2.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/_params.tpl create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-role.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-service.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 ansible/roles/helm_install/files/ingress-nginx/values.yaml create mode 100644 ansible/roles/helm_install/files/kafka/.helmignore create mode 100644 ansible/roles/helm_install/files/kafka/Chart.yaml create mode 100644 ansible/roles/helm_install/files/kafka/README.txt create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/Chart.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/LICENSE create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/README.md create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/deployment.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/ingress.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/secret.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/service.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/akhq/values.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/.helmignore create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/Chart.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/index.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap_fromValues.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/deployment.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/hpa.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/ingress.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-egress.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-ingress.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/secret.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/service.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka-ui/values.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/.helmignore create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/1.broker-config.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/Chart.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/templates/2.dns.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/templates/5.kafka.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/templates/6.outside.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/kafka/values.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/.helmignore create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/Chart.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/0.config.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/2.service-client.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 ansible/roles/helm_install/files/kafka/charts/zookeeper/values.yaml create mode 100644 ansible/roles/helm_install/files/kafka/index.yaml create mode 100644 ansible/roles/helm_install/files/kafka/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/kafka/test create mode 100644 ansible/roles/helm_install/files/kafka/values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/Chart.lock create mode 100644 ansible/roles/helm_install/files/keycloak/Chart.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/README.md create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/.helmignore create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.lock create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/README.md create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/extended-config.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/init-scripts.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/metrics.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/rbac.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/replication.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/tls.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/networkpolicy-egress.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/extended-configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/initialization-configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-svc.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/prometheusrule.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc-headless.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/psp.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc-headless.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/tls-secrets.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/values.schema.json create mode 100644 ansible/roles/helm_install/files/keycloak/charts/postgresql/values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/ci/ct-values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/ci/values-ha.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/ci/values-hpa-pdb.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/ci/values-init-scripts.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/ci/values-metrics-and-ingress.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/override-values.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/keycloak/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/keycloak/templates/configmap-env-vars.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/headless-service.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/hpa.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/ingress.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/init-scripts-configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-configmap.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-job.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/metrics-service.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/pdb.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/service.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/templates/tls-secret.yaml create mode 100644 ansible/roles/helm_install/files/keycloak/values.yaml create mode 100644 ansible/roles/helm_install/files/kubeconfig create mode 100644 ansible/roles/helm_install/files/mongo-dsk/.helmignore create mode 100644 ansible/roles/helm_install/files/mongo-dsk/Chart.lock create mode 100644 ansible/roles/helm_install/files/mongo-dsk/Chart.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/README.md create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/override-values.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/headless-service.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-dep-sts.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service-per-replica.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/replicaset-entrypoint-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-dsk/values.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/.helmignore create mode 100644 ansible/roles/helm_install/files/mongo-manifest/Chart.lock create mode 100644 ansible/roles/helm_install/files/mongo-manifest/Chart.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/README.md create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/override-values.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/headless-service.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-dep-sts.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service-per-replica.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/replicaset-entrypoint-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-configmap.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-poddisruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-podmonitor.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/mongo-manifest/values.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/.helmignore create mode 100644 ansible/roles/helm_install/files/postgresql/Chart.lock create mode 100644 ansible/roles/helm_install/files/postgresql/Chart.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/README.md create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common-1.17.1.tgz create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/extended-config.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/init-scripts.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/metrics.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/rbac.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/replication.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/ci/tls.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/override-values.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/postgresql/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/postgresql/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/networkpolicy-egress.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/configmap.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/extended-configmap.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/initialization-configmap.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/metrics-configmap.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/metrics-svc.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/prometheusrule.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/svc-headless.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/primary/svc.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/psp.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/read/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/read/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/read/svc-headless.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/read/svc.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/templates/tls-secrets.yaml create mode 100644 ansible/roles/helm_install/files/postgresql/values.schema.json create mode 100644 ansible/roles/helm_install/files/postgresql/values.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/.helmignore create mode 100644 ansible/roles/helm_install/files/rabbitmq/Chart.lock create mode 100644 ansible/roles/helm_install/files/rabbitmq/Chart.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/README.md create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common-1.17.1.tgz create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/ci/default-values.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/ci/tolerations-values.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/override-values.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/configuration.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/ingress.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/pdb.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/prometheusrule.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/secrets.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/svc-headless.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/svc.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/templates/tls-secrets.yaml create mode 100644 ansible/roles/helm_install/files/rabbitmq/values.schema.json create mode 100644 ansible/roles/helm_install/files/rabbitmq/values.yaml create mode 100644 ansible/roles/helm_install/files/redis/.helmignore create mode 100644 ansible/roles/helm_install/files/redis/Chart.lock create mode 100644 ansible/roles/helm_install/files/redis/Chart.yaml create mode 100644 ansible/roles/helm_install/files/redis/README.md create mode 100644 ansible/roles/helm_install/files/redis/charts/common-1.17.1.tgz create mode 100644 ansible/roles/helm_install/files/redis/charts/common/.helmignore create mode 100644 ansible/roles/helm_install/files/redis/charts/common/Chart.yaml create mode 100644 ansible/roles/helm_install/files/redis/charts/common/README.md create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_affinities.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_capabilities.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_errors.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_images.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_ingress.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_labels.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_names.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_secrets.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_storage.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_utils.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/_warnings.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/roles/helm_install/files/redis/charts/common/values.yaml create mode 100644 ansible/roles/helm_install/files/redis/ci/extra-flags-values.yaml create mode 100644 ansible/roles/helm_install/files/redis/ci/sentinel-values.yaml create mode 100644 ansible/roles/helm_install/files/redis/ci/standalone-values.yaml create mode 100644 ansible/roles/helm_install/files/redis/img/redis-cluster-topology.png create mode 100644 ansible/roles/helm_install/files/redis/img/redis-topology.png create mode 100644 ansible/roles/helm_install/files/redis/override-values.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/redis/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/redis/templates/configmap.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/extra-list.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/headless-svc.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/health-configmap.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/master/application.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/master/psp.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/master/pvc.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/master/service.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/metrics-svc.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/networkpolicy.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/pdb.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/prometheusrule.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/replicas/hpa.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/replicas/service.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/replicas/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/role.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/scripts-configmap.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/secret.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/sentinel/hpa.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/sentinel/node-services.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/sentinel/ports-configmap.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/sentinel/service.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/sentinel/statefulset.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/redis/templates/tls-secret.yaml create mode 100644 ansible/roles/helm_install/files/redis/values.schema.json create mode 100644 ansible/roles/helm_install/files/redis/values.yaml create mode 100644 ansible/roles/helm_install/files/vault/.gitignore create mode 100644 ansible/roles/helm_install/files/vault/.helmignore create mode 100644 ansible/roles/helm_install/files/vault/00.old/override-values.yaml_221117 create mode 100644 ansible/roles/helm_install/files/vault/00.old/override-values.yaml_bak create mode 100644 ansible/roles/helm_install/files/vault/CHANGELOG.md create mode 100644 ansible/roles/helm_install/files/vault/CONTRIBUTING.md create mode 100644 ansible/roles/helm_install/files/vault/Chart.yaml create mode 100644 ansible/roles/helm_install/files/vault/LICENSE create mode 100644 ansible/roles/helm_install/files/vault/Makefile create mode 100644 ansible/roles/helm_install/files/vault/README.MD create mode 100644 ansible/roles/helm_install/files/vault/override-values.yaml create mode 100644 ansible/roles/helm_install/files/vault/override-values.yaml_bak create mode 100644 ansible/roles/helm_install/files/vault/templates/NOTES.txt create mode 100644 ansible/roles/helm_install/files/vault/templates/_helpers.tpl create mode 100644 ansible/roles/helm_install/files/vault/templates/csi-clusterrole.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/csi-clusterrolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/csi-daemonset.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/csi-serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-certs-secret.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-clusterrole.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-clusterrolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-deployment.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-disruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-mutating-webhook.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-network-policy.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-psp-role.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-psp-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-psp.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-role.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/injector-serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/prometheus-prometheusrules.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/prometheus-servicemonitor.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-clusterrolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-config-configmap.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-discovery-role.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-discovery-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-disruptionbudget.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-ha-active-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-ha-standby-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-headless-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-ingress.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-network-policy.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-psp-role.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-psp-rolebinding.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-psp.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-route.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-serviceaccount.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/server-statefulset.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/tests/server-test.yaml create mode 100644 ansible/roles/helm_install/files/vault/templates/ui-service.yaml create mode 100644 ansible/roles/helm_install/files/vault/test create mode 100644 ansible/roles/helm_install/files/vault/tls/ca-cert.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/ca-cert.srl create mode 100644 ansible/roles/helm_install/files/vault/tls/ca-key.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/client-cert.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/client-key.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/client-req.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/ext.conf create mode 100755 ansible/roles/helm_install/files/vault/tls/generator.sh create mode 100644 ansible/roles/helm_install/files/vault/tls/server-cert.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/server-key.pem create mode 100644 ansible/roles/helm_install/files/vault/tls/server-req.pem create mode 100644 ansible/roles/helm_install/files/vault/values.openshift.yaml create mode 100644 ansible/roles/helm_install/files/vault/values.schema.json create mode 100644 ansible/roles/helm_install/files/vault/values.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/configmap.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/deployment.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/pvc.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/test/configmap.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/test/deployment.yaml create mode 100644 ansible/roles/helm_install/files/vault_agent/test/pvc.yaml create mode 100644 ansible/roles/helm_install/tasks/helm-chart-install.yml create mode 100644 ansible/roles/helm_install/tasks/helm-chart-install.yml_bak create mode 100644 ansible/roles/helm_install/tasks/helm-chart-nginx.yml create mode 100644 ansible/roles/helm_install/tasks/helm-install.yml create mode 100644 ansible/roles/helm_install/tasks/main.yml create mode 100644 ansible/roles/kubernetes_install/README.md create mode 100644 ansible/roles/kubernetes_install/defaults/main.yml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/.helmignore create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/Chart.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/OWNERS create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/README.md create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/override-values.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/temp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/temp2.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 ansible/roles/kubernetes_install/files/ingress-nginx/values.yaml create mode 100644 ansible/roles/kubernetes_install/files/kubeconfig create mode 100644 ansible/roles/kubernetes_install/handlers/main.yml create mode 100644 ansible/roles/kubernetes_install/meta/main.yml create mode 100644 ansible/roles/kubernetes_install/tasks/helm-chart-nginx.yml create mode 100644 ansible/roles/kubernetes_install/tasks/helm-install.yml create mode 100644 ansible/roles/kubernetes_install/tasks/k8s-helm-chart.yml create mode 100644 ansible/roles/kubernetes_install/tasks/k8s-main.yml create mode 100644 ansible/roles/kubernetes_install/tasks/k8s-master.yml create mode 100644 ansible/roles/kubernetes_install/tasks/k8s-node.yml create mode 100644 ansible/roles/kubernetes_install/tasks/main.yml create mode 100644 ansible/roles/kubernetes_install/tasks/os-main.yml create mode 100644 ansible/roles/kubernetes_install/tasks/os-runtime.yml create mode 100644 ansible/roles/kubernetes_install/templates/config.toml.j2 create mode 100644 ansible/roles/kubernetes_install/templates/hosts.j2 create mode 100644 ansible/roles/kubernetes_install/templates/yaml2toml_macro.j2 create mode 100644 ansible/roles/kubernetes_install/tests/inventory create mode 100644 ansible/roles/kubernetes_install/tests/test.yml create mode 100644 ansible/roles/kubernetes_install/vars/main.yml diff --git a/ansible/roles/helm_install/defaults/main.yml b/ansible/roles/helm_install/defaults/main.yml new file mode 100644 index 0000000..2bfebf8 --- /dev/null +++ b/ansible/roles/helm_install/defaults/main.yml @@ -0,0 +1,5 @@ +helm_checksum: sha256:3156e4fe5f034e5b127cf165d61a8a1c48eb7a73b14689b273de5e6117df6fe2 +helm_version: v3.2.3 + +kubernetes_version: 1.25.2 +kubernetes_middleware_namespace: dsk-middle diff --git a/ansible/roles/helm_install/files/druid/Chart.lock b/ansible/roles/helm_install/files/druid/Chart.lock new file mode 100644 index 0000000..c3bb822 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: zookeeper + repository: https://charts.helm.sh/incubator + version: 2.1.4 +- name: mysql + repository: https://charts.helm.sh/stable + version: 1.6.4 +- name: postgresql + repository: https://charts.helm.sh/stable + version: 8.6.4 +digest: sha256:fb2ab5eed4b4fc00eee5f23764209d7cb494a07161439ea28d85ed3741eaf7f7 +generated: "2022-07-29T12:12:44.428393074+09:00" diff --git a/ansible/roles/helm_install/files/druid/Chart.yaml b/ansible/roles/helm_install/files/druid/Chart.yaml new file mode 100644 index 0000000..c52a420 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/Chart.yaml @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v2 +appVersion: 0.23.0 +description: Apache Druid is a high performance real-time analytics database. +name: druid +dependencies: + - name: zookeeper + version: 2.1.4 + repository: https://charts.helm.sh/incubator + condition: zookeeper.enabled + - name: mysql + version: 1.6.4 + repository: https://charts.helm.sh/stable + condition: mysql.enabled + - name: postgresql + version: 8.6.4 + repository: https://charts.helm.sh/stable + condition: postgresql.enabled +version: 0.3.1 +home: https://druid.apache.org/ +icon: https://druid.apache.org/img/favicon.png +sources: + - https://github.com/apache/druid +keywords: + - olap + - database + - analytics diff --git a/ansible/roles/helm_install/files/druid/README.md b/ansible/roles/helm_install/files/druid/README.md new file mode 100644 index 0000000..34428a3 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/README.md @@ -0,0 +1,212 @@ + + +# Apache Druid + +[Apache Druid](https://druid.apache.org/) is a high performance real-time analytics database. + +## Dependency Update + +Before you install the Druid Chart, update the dependencies : +```bash +helm dependency update helm/druid +``` + +## Install Chart + +To install the Druid Chart into your Kubernetes cluster : + +```bash +helm install druid helm/druid --namespace dev --create-namespace +``` + +After installation succeeds, you can get a status of Chart + +```bash +helm status druid -n dev +``` + +If you want to delete your Chart, use this command: + +```bash +helm uninstall druid -n dev +``` + +### Helm ingresses + +The Chart provides ingress configuration to allow customization the installation by adapting +the `values.yaml` depending on your setup. +Please read the comments in the `values.yaml` file for more details on how to configure your reverse +proxy or load balancer. + +### Chart Prefix + +This Helm automatically prefixes all names using the release name to avoid collisions. + +### URL prefix + +This chart exposes 6 endpoints: + +- Druid Overlord +- Druid Broker +- Druid Coordinator +- Druid Historical +- Druid Middle Manager +- Druid Router + +### Druid configuration + +Druid configuration can be changed by using environment variables from Docker image. + +See the +[Druid Docker entry point](https://github.com/apache/druid/blob/master/distribution/docker/druid.sh) +for more informations + +### Middle Manager and Historical Statefulset + +Middle Managers and Historicals uses StatefulSet. Persistence is enabled by default. + +## Helm chart Configuration + +The following table lists the configurable parameters of the Druid chart and their default values. + +| Parameter | Description | Default | +|------------------------------------------|---------------------------------------------------------|--------------------------------------------| +| `image.repository` | container image name | `apache/druid` | +| `image.tag` | container image tag | `0.19.0` | +| `image.pullPolicy` | container pull policy | `IfNotPresent` | +| `image.pullSecrets` | image pull secrest for private repositoty | `[]` | +| `configMap.enabled` | enable druid configuration as configmap | `true` | +| `configVars` | druid configuration variables for all components | `` | +| `gCloudStorage.enabled` | look for secret to set google cloud credentials | `false` | +| `gCloudStorage.secretName` | secretName to be mounted as google cloud credentials | `false` | +| `broker.enabled` | enable broker | `true` | +| `broker.name` | broker component name | `broker` | +| `broker.replicaCount` | broker node replicas (deployment) | `1` | +| `broker.port` | port of broker component | `8082` | +| `broker.serviceType` | service type for service | `ClusterIP` | +| `broker.resources` | broker node resources requests & limits | `{}` | +| `broker.podAnnotations` | broker deployment annotations | `{}` | +| `broker.nodeSelector` | Node labels for broker pod assignment | `{}` | +| `broker.tolerations` | broker tolerations | `[]` | +| `broker.config` | broker private config such as `JAVA_OPTS` | | +| `broker.affinity` | broker affinity policy | `{}` | +| `broker.ingress.enabled` | enable ingress | `false` | +| `broker.ingress.hosts` | hosts for the broker api | `[ "chart-example.local" ]` | +| `broker.ingress.path` | path of the broker api | `/` | +| `broker.ingress.annotations` | annotations for the broker api ingress | `{}` | +| `broker.ingress.tls` | TLS configuration for the ingress | `[]` | +| `coordinator.enabled` | enable coordinator | `true` | +| `coordinator.name` | coordinator component name | `coordinator` | +| `coordinator.replicaCount` | coordinator node replicas (deployment) | `1` | +| `coordinator.port` | port of coordinator component | `8081` | +| `coordinator.serviceType` | service type for service | `ClusterIP` | +| `coordinator.resources` | coordinator node resources requests & limits | `{}` | +| `coordinator.podAnnotations` | coordinator Deployment annotations | `{}` | +| `coordinator.nodeSelector` | node labels for coordinator pod assignment | `{}` | +| `coordinator.tolerations` | coordinator tolerations | `[]` | +| `coordinator.config` | coordinator private config such as `JAVA_OPTS` | | +| `coordinator.affinity` | coordinator affinity policy | `{}` | +| `coordinator.ingress.enabled` | enable ingress | `false` | +| `coordinator.ingress.hosts` | hosts for the coordinator api | `[ "chart-example.local" ]` | +| `coordinator.ingress.path` | path of the coordinator api | `/` | +| `coordinator.ingress.annotations` | annotations for the coordinator api ingress | `{}` | +| `coordinator.ingress.tls` | TLS configuration for the ingress | `[]` | +| `overlord.enabled` | enable overlord | `false` | +| `overlord.name` | overlord component name | `overlord` | +| `overlord.replicaCount` | overlord node replicas (deployment) | `1` | +| `overlord.port` | port of overlord component | `8081` | +| `overlord.serviceType` | service type for service | `ClusterIP` | +| `overlord.resources` | overlord node resources requests & limits | `{}` | +| `overlord.podAnnotations` | overlord Deployment annotations | `{}` | +| `overlord.nodeSelector` | node labels for overlord pod assignment | `{}` | +| `overlord.tolerations` | overlord tolerations | `[]` | +| `overlord.config` | overlord private config such as `JAVA_OPTS` | | +| `overlord.affinity` | overlord affinity policy | `{}` | +| `overlord.ingress.enabled` | enable ingress | `false` | +| `overlord.ingress.hosts` | hosts for the overlord api | `[ "chart-example.local" ]` | +| `overlord.ingress.path` | path of the overlord api | `/` | +| `overlord.ingress.annotations` | annotations for the overlord api ingress | `{}` | +| `overlord.ingress.tls` | TLS configuration for the ingress | `[]` | +| `historical.enabled` | enable historical | `true` | +| `historical.name` | historical component name | `historical` | +| `historical.replicaCount` | historical node replicas (statefulset) | `1` | +| `historical.port` | port of historical component | `8083` | +| `historical.serviceType` | service type for service | `ClusterIP` | +| `historical.resources` | historical node resources requests & limits | `{}` | +| `historical.podAnnotations` | historical Deployment annotations | `{}` | +| `historical.nodeSelector` | node labels for historical pod assignment | `{}` | +| `historical.securityContext` | custom security context for historical containers | `{ fsGroup: 1000 }` | +| `historical.tolerations` | historical tolerations | `[]` | +| `historical.config` | historical node private config such as `JAVA_OPTS` | | +| `historical.persistence.enabled` | historical persistent enabled/disabled | `true` | +| `historical.persistence.size` | historical persistent volume size | `4Gi` | +| `historical.persistence.storageClass` | historical persistent volume Class | `nil` | +| `historical.persistence.accessMode` | historical persistent Access Mode | `ReadWriteOnce` | +| `historical.antiAffinity` | historical anti-affinity policy | `soft` | +| `historical.nodeAffinity` | historical node affinity policy | `{}` | +| `historical.ingress.enabled` | enable ingress | `false` | +| `historical.ingress.hosts` | hosts for the historical api | `[ "chart-example.local" ]` | +| `historical.ingress.path` | path of the historical api | `/` | +| `historical.ingress.annotations` | annotations for the historical api ingress | `{}` | +| `historical.ingress.tls` | TLS configuration for the ingress | `[]` | +| `middleManager.enabled` | enable middleManager | `true` | +| `middleManager.name` | middleManager component name | `middleManager` | +| `middleManager.replicaCount` | middleManager node replicas (statefulset) | `1` | +| `middleManager.port` | port of middleManager component | `8091` | +| `middleManager.serviceType` | service type for service | `ClusterIP` | +| `middleManager.resources` | middleManager node resources requests & limits | `{}` | +| `middleManager.podAnnotations` | middleManager Deployment annotations | `{}` | +| `middleManager.nodeSelector` | Node labels for middleManager pod assignment | `{}` | +| `middleManager.securityContext` | custom security context for middleManager containers | `{ fsGroup: 1000 }` | +| `middleManager.tolerations` | middleManager tolerations | `[]` | +| `middleManager.config` | middleManager private config such as `JAVA_OPTS` | | +| `middleManager.persistence.enabled` | middleManager persistent enabled/disabled | `true` | +| `middleManager.persistence.size` | middleManager persistent volume size | `4Gi` | +| `middleManager.persistence.storageClass` | middleManager persistent volume Class | `nil` | +| `middleManager.persistence.accessMode` | middleManager persistent Access Mode | `ReadWriteOnce` | +| `middleManager.antiAffinity` | middleManager anti-affinity policy | `soft` | +| `middleManager.nodeAffinity` | middleManager node affinity policy | `{}` | +| `middleManager.autoscaling.enabled` | enable horizontal pod autoscaling | `false` | +| `middleManager.autoscaling.minReplicas` | middleManager autoscaling min replicas | `2` | +| `middleManager.autoscaling.maxReplicas` | middleManager autoscaling max replicas | `5` | +| `middleManager.autoscaling.metrics` | middleManager autoscaling metrics | `{}` | +| `middleManager.ingress.enabled` | enable ingress | `false` | +| `middleManager.ingress.hosts` | hosts for the middleManager api | `[ "chart-example.local" ]` | +| `middleManager.ingress.path` | path of the middleManager api | `/` | +| `middleManager.ingress.annotations` | annotations for the middleManager api ingress | `{}` | +| `middleManager.ingress.tls` | TLS configuration for the ingress | `[]` | +| `router.enabled` | enable router | `false` | +| `router.name` | router component name | `router` | +| `router.replicaCount` | router node replicas (deployment) | `1` | +| `router.port` | port of router component | `8888` | +| `router.serviceType` | service type for service | `ClusterIP` | +| `router.resources` | router node resources requests & limits | `{}` | +| `router.podAnnotations` | router Deployment annotations | `{}` | +| `router.nodeSelector` | node labels for router pod assignment | `{}` | +| `router.tolerations` | router tolerations | `[]` | +| `router.config` | router private config such as `JAVA_OPTS` | | +| `router.affinity` | router affinity policy | `{}` | +| `router.ingress.enabled` | enable ingress | `false` | +| `router.ingress.hosts` | hosts for the router api | `[ "chart-example.local" ]` | +| `router.ingress.path` | path of the router api | `/` | +| `router.ingress.annotations` | annotations for the router api ingress | `{}` | +| `router.ingress.tls` | TLS configuration for the ingress | `[]` | + +Full and up-to-date documentation can be found in the comments of the `values.yaml` file. diff --git a/ansible/roles/helm_install/files/druid/charts-archive/mysql-1.6.4.tgz b/ansible/roles/helm_install/files/druid/charts-archive/mysql-1.6.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..89202e2bbaac436b6b16d44986c024714b56c1f1 GIT binary patch literal 11121 zcmV-%D~{A3iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYOciXm-IKFR}pJI-jUE8@PCE0P(>i)Cms*aNSG_k#w(r#~V zjs=mBguh9G0YEuwobUcSc;GYjWZ6l(YraS<5*T2BnZaN%GYIDVlixg#s zk7 z9NL`5%*C=ilJ$`f9ujDTIJN-zoC_Q{77iyQ#2uJW5&71X2Dk%LAtK&sHg9fj97Upz zOM|9GT-k~Gk1h=Y8a918pf8`9{RfXI6AXEX$tEp;5Xs(B9|wqC$2_YrBNHEUg7`lslumqH zD4%|Gv8CVW&gkJ%?It)*h?vGBh2V8O!YsrB^QJ5LmDd;ZsC$i>?UNBhY~Iw6XUU|t zo#)OoE1Q4K|1;#rm_L*NSkC|Lqr-NE|DQd7@ti5 zwyaUi=Og-Aeo7#jR}EQO0ctx(ZKvI^EP=Ur%}{`ED7%(ba41EQ*8#y|oCD!`vE*Ci zH%CbD3{uN^=ExrH>_9)lE*Z}uLYzwp0b|O5Q7WV?V-mBS9e9PuDE37M7z#ZaK)VH! z5s@n}11=Z|Czct`OG?Fsfz<)oQ~6N>IZ>zrO65-0g0A|WW7gTQxwI+OMA!G}4a6ae z#T18vxJY0Rkn3X34%JV$+P>#1zA~>mh4Dn8o`s9p z_b;eV+&S!B;!iPQ*xQ!{PsJ_=hIN-b?8#2KQxr}xh$-Rn=Wf@(L36$fWDH`84W$Y+ z0m5O|M{;y`3l!UF=7gm~sFaDkDaYBK#Kgl*1%fws;>bgCW^DZ;rQoc)9XK7IQE|aA z$Dz=}>v=@3W^y@0Y!b_{m6kzE>d^^<^c)SfN8kL=ge2K%JSHI#^ZRTS8JIQ-JsX9B*vT6!`6F$AKz*#?a3%F4>VXC* z8C#aPyj!p2Bns67kv5?g$?T`$LZ8fVh&jJtbcEI77ZO2`{|fs^&J+zj-hrc*1rT9I zsF%KL%XfkVn8qS~c`TLdSc&_=lwm%lzSn`a1u#a$j~ULrJ+dsxdt`O<$7{CU-mLYV z9k`H62N!Bp(IP9-Z{#91Q&PrC-?+GLso&Yj)>8%MB7>mi6!ZmUa^X1f5$C$f=@>jb zCL#6?fa}ld>zU%0|1baQztS2UE4kXR8+DZ$jRUxu5_bw7;j)h$ym^QM;zGpej7aUu z2rLCH4oweWPf8545bOhGNkd`m`v(xxN@Ge~0Wr9MEPnv*lu}N@3FM$`kIW zpdoaB>q`t!=pp6-4HGzTe(4>^kMN&x9^tSrkbAwaJ}PmN??y)sg9S#nct<^@%^~kM zMg%?&UA4_4f8Y-M@P^28VMs+n`G)`AbF2ieFE9(GdcoZ(_F^Bi1HjJ2frf1Hy5X!N zAOzyqyotPkgmU(o?fS9&KV?m92eB_m(ivEN)($ z=-&le93x+CY*`$3xl-fc#~*)`gwNkF8b|u4QlygJ0K@4X`BRL!;A$i7lYj`mnNH)< zAc_UH6NnnC^q1^u(uVvSAix1-^9~#yKYvHm8#jt2Q?~-SE}q0((7@~t3X%=RMACJH z#MCN$crD+Xg&qsnYz|}*$7`+yGd8Ldongj^myxip!m*I*8n0`2c$VbJ`#Dbsc!sd)W|$-9M$ zK0wAfa=@#yOX9qv8pL9XW5tu)Wk~L58**7{)E1F_>OkYicI%leP+9sHtiJ7I^{sBH zudd%qR`B}udh8>!BaqX}X2iwsXh@`*rs`CN8vvPI5c1!m5%x2Ap2;>z$p+eHqRdp{ zP0Re%sU83PJ~L@ZnH2Q5wUx=FIXlGShO%oCPLg-cohTmpgv;%MotCqcKW*6>PYoF- zda*gZkgAfJ=cA+6bG2$pz4SuAI&8IC>YY!K_Y?Bvo{XJd=mqD3dK~%&m#*u|t<(Ao zU$)9gT{zuH&{J=dB97w}O83rQEhsBnc`)GDc)kGkWxop-m#4osqIrm2JrAKXZiQ2+hlkgps;!D zylCREDWTaCoXz7%a-uU8fnPS`g~W$Fy)54l-&eL6wJPlLRPRXoB{gsss7?O+!HF_m z^|T461H0Xr3x<4xn*F&K;`y$%lU9^$4X<$I)49T_n~5>YOJ8>@iZKkRWG>`+nK3Ed z;%Fs9ZdLdAz7a4Gq;W48D+z>G( zA~2Y$d_7iW{_3Nfs*v8{oE|~mXtwKp>lY~jgpvIZ0; zSm@DK2UIBt7Ka)ec=vn%zu#V6o}Uk{F1r2xZ|9elaXY9rTmqflfMUzCrX!6-m4xwSEz+w`j zJ*MmiNjW=no2!RuRloA`L1~c*nLqiGq5T!?i{!V&_AA)27Ls<9ye~*aJ#PP()y}g1 zcQwU+gc)~4bZ@ReEA+p^XYIq1{`c&8``M%Z_br|;Uz$&0MuLvAxJdOwN|cv@92R$q zJMgsm^{XXIS-sDZk^m)Rj`BrwEK|-lCu#|sYZkY&Z)#WVXopjEij}avEP0-o{cl0A zICSCpk@}Ma{dhbkpP^xAz@#{kf9u|5zA2^+DPZRCDMmgS6YK$sBGqrl`VDL7s=SbG zNE|qfu#2Sbz-fTtm(<2)R9%97k1G!kbvl#m$_a=i?OCLAxr-9P=4{rYlZMSp7|d}C~qW`2#9EgM-h zjXd?kNZLvVbdQ9tAA5Bx=Khkc&_dQIGj3urM+GzN;*RERwr7hduUO168>+itR)VB@ zUY+0+X2bkZQITo^)Xd=bmc-xZDX;(4gH>C<3Q%-c|KL^r|IeO1E3W^Ctry3S>;E@- zO6*&me#tVWAc1QVdL47l_YOr?fCZA9#!l+szEa<}3k{iiYFE`+xY@FF8?whtIfy>F z#`6PsGDE&xhnAxtcMz%qeEq6UIpj}S^AtkB(kE^8dGZ@~n-bh&NOI&d#jv!r*JNG=Q&6` zJimUmvo{7Y`1-YBPNGx~Os#?T)jlIG2cpHAobui4qiyO)3>?t-Dre!<5e?U7Njr| zZ?cR=^ zQ*=JmL^|%vP$hD!s~URaUBpXHnvQYx z$PISYi&HE|9E`u|!T6X~p5;Sa(zj^@3KA%N zSqyV2z|NH^vq$FD?(lddE9d(TU|tpmaAl=c>`@b3lgQ1;Us@_B^`5UU1oB}YS)oEO zNXomGKFp)0!F-*y+H@B(^!m$c4SVZwx*}gE z{<8PM)K;D>3rj9y6Q1v!3QH>#*ZVxDNZz(;xK3_VhOGA0Qt7f%p5Sm+>|>4$7DQ0y zt0z;Y52Lzo-=6<=)q8g__&q(7%3-R)vJ2kCORyx`adHoZUN*vMU-AK|uqO)^I-UQ# zzrVP{h10WgPhT?{baF2fm{1T6gUzThn@+Q&J)I;K?SmRw#Dz^F5=!cqcz%;a| z40JVtZ9N<7%*3B&5|)oYWhL6n0<7Q!eNEGx0)XwZdV%0-KSssW@60hNmu6@Y5M%p+xP zMVZAR-G($vknTpCB_MYu&Jvt0sIvrV8F~IM*>qOgU34&$6~4={PH}V!rFi`~efa`U z7DF|dWv4|82B!Qqf0VrbnAF5~)}a-U$x@V(q)#{$Y#vb(3R^<5^g3T4Wc*M;qsrL>M7tzwuCvM#V}ubjZB+cOuwZL)VRR9tQt z+kD}vtY0?l^675=r|lMgO5OFUfBydRq<3}Jeb;;0kc-DZ_LM9BlycFLTHb!M0nHH! zO|b1%5^TF$oR88HR@glM^>&u`HJF_zH8|gKgsU88r06&=Jrf#JfxboNma=TTrZr{J zuAsy@m3_gF?bfr^`+}(u(Hks^(-~>WQhcX?R$E=Ar6i-Up&q?N(Wr;GSl?KzdVVwH zH&xZvzpc%3U2?YP-IPHn2n&*s7z& zM%V0Vt*Wq8QAonAvJPg_xOi&D6G6RX&TF*cC0cHcQj=vE(#pk&k4RyOapj9j=L%4b z>!qp+x9fQ~7EvlMkNG5PMa=D`hdCyL+j9MBjpLi|?TzyIeCwy||5YP`wsrtr?f?0r zUXRWGY^zFJdomNo+uPL&q z_r)$X>Q;>6pWmCumpx_qZ?X#C>l&~^{vW<*9aZ9gkJ^v&|64pcUzOz{h1!>+unyGw zv+ToO+3BO;?o#jRvQW#^y?6{k$V+maZSXuopJHq!qw?iTVqy4Ye?rRSD77Tqk2Rjs_ zUpK5=4kj*~3?{}S9D2&XEHxd~xg(HJU^YX32aZ}iKT-W^e6pIYETC!0M6Mm&yozpM zo%rIvxLjZNsmT9tZ~s4RAC~3+(TiutkMjRpJj?XDN25zoESF9R>J;a~*+Q)}t}D3k zlp(h|?=4uK_p4uWp!b(A32SvC5&e2cT$89X%R!o$=T(RCr}~nOx4C;S8vEL&SPBA5 zDip8C*iQoDN;hZJb*fk->t1S4Nqrfsu}ykSTauDg77~a}u+aXES=9w(ktBLeDreV$ z)U;AATXUd?rWnNiUZv(WprSfQojdb=i%es6j9_-yu+*MEVz(Eo0q_bqu=?f;%X zFX#V%ad`ZA{_{9`y4#q4uTo@{$cxp=uowsKpp6AQD1XHKq-qodaI zhOV;oiahiE8qZ%=ugA-UCN4dLGMMLOD&Snhj-PrzmSQrq~Z0K5b4Zy>4_sDOe_)INe~?W$j8-c~xPY z%uHfut41gmC$J`ESFu1baaSq6&4$JZ@o6LbX}j5v+gxae02_Z!fA4@xqdAO_Q1|x> zN_`FcIt=}9rvNYO`;3S;*SXtA});>$d4KxG~a_G zefSysK{gadci)Eyxz|z&3{Z$BnCYHgFwF3$m~bMs1k29f9MpE&t-t9&l%KFb(7ib2 z$;H!_4Hv)ayZq!H1T!#q6(?gDhaP7BTy~0tV}`h3u`6PR%>bz@1d}eS{fxTKkL0c; z)I+A)mp==45erLu250f}x3Mc;x4Sy|v}N==fX;czHt2dDSL>G9`g zHq6bTxe-YQ+u)d3Nea>Oj=C`XL*d=Wy`94EeW`uPKfotk==-E2B=w!ZHPV&**TxqUMda`H!*!|ak&|MhT+#gy4 z!#0Mr^{xc{@bu%?#&F-V{tKQy9gyhh(+<2PT!5rKLvh@2=1>u)obI9GfcGI;!~=2i zsp(-Ko4b$l1fP==RoXXoY&(kCBv1QQa@U!(IV231r*s-bG{m9cfH9ZtC6z zh)v=^5u%_LTHT)bC5f@R2QGy*iiYYxG!ls){DKh$<5DS{~{g0lJgL%tUH&8nqn z4A5kqx@9y3n%qnOGw{dG3#a{TGdvU;g*ysX)X$1P$b96%Z&EFfhFD`ls%YHneB8pY z?HvB-9Nl3yYEKasz%j)$+x8In4H542eo&F%S-dWzX4R@_EGMlRH13nOZei$ovD_UX zU)hq^L#PIg<)pO`jr*akTNt*SXHM(Rk}6A&xD>=KoT^?&vF}5q(<$ELa|^@M@fj5t z409ZcwHIZHMlrf}dDUBZ&oT5&jYR2|`;YXr*uw%5|85K5Eeu1l>8g>Uk>!^+^UOVC?bCd<2ZCzDU`RHB2bTpK6hEl8x=H6O4575WotBw zceN~$R5>)HdX+mPy9Z3RL8G!MUOF2o4T(yN-AC31Mvo%;tu{cYvb@Lq9ecN>(A|c`3qjE5(U}))Vg_3)3MELtjt+D%(tUHzzC12y02VUl|wzka?Evgv3$^% zpmEf?OSLK*h3xK?ak+(`=?&1az@gf?Vm78M(560x$#&1Jq^5g8qnPltjK?kfHpOAC z+p;(Z8gFRJSct~)ovKyQD5tj{mfCqoLK4J*TrW6wV?ky(NuQ5-%J&9GYD}-3cvv#O zDL9UOKx2U!OwmjVf-xdK_8d7I_k>0{xBfb4m^v%5(7i2UpfJOpL_+b#ozYmGnRl2& zqrl>6)qCRcZHQ{w4GvY__|vlZ(xk@2ayt*q;&16N!~^yK)Hq{%GX9vKO2-Wm!u>8-<{dD|EZ(7QTGF zOErl`{xaOv=C(8HVdFNhR=bFLfH)_UP&vkI_iTeka>?#`r%m1DN?sQG_`WgO294a+ zy=xrPTu=`LBB99N2&}NlIV;`u*#?bL?C=VUdS57q0g!r1Zk$reHW-X`P+VG&JipGlL znhF~CdtS56F-_$%nx&@b6paNA3frP_cN^}yHy#ZuCIwUV-mHhW(7uaJVF?=bTk=1# zxo!O`n1+jHr6p-UF|`&h+SWGNd$c{YrmoA>yQIrf0l zGCqkbuSd8`XUfYUC{2^wG9g1ZHLFt3+`>p{M!^7 z)mzh+RL~x~bZl6mP#4DP`jsu7ZI-|K>s=R_6x7wP4~Rw+3sh8n1|Yo0=G8%E=30UGKXoW4p0 zZR)0Q518D-k3ar+hiY|busH1U1;_pRc>kfW*bt*54%y=0YnVSrrM6Gbg!6sovSk;a}Be9Cy8wLja7HBX+)wU_QVsEKMy^n=D?5ot;s{bkYyQ7hv+2v{L-~T-Y zT|-C3v%Y`VPrfU;H(II-sqjtKJSTl0c~aCRmF_p-MRrhUer{Z1t;aUj7`t1`>eEbv z$weWf5uITOP?S(54ree!j7U9|gNMf?#9ophK&PdESJb`6?B{r-**=$>As#uN`l#-g zxH9&YeaEu&?qUuYxznrzIVE~%+Z+qI0KQaF&mRubKR*uBs99wx&F4fII`En@z@JeN z`S@Ui)CvEx0Cby`NQakN$4lbk57HQdm%-eQIc5h1cYOJ_ST^x+mtRu9^?&KCRkax# z^gJYi>>QimaXsE9l)_vCmE{_cV^Qqs(8(EN{W%a}b~`$f1ce@AUgk-!q8sG6cq$%W z{u~};QSoU*!yJOLz?8xiadUU65^EXE3|#VR=vY?Q7np@ekQw&p2Y}%B?mNBCYFjZ+ zH=kMBh0HfFqgP{7PQw&#rV>Oc^?-_|zcXFf<9ynOn)%9QM@TY4$URyN#+Nmr@x%AJc7zj@{|4PGU!kBX$X{g`W(h1TvN z26B~i4n*nR7=>v@IW?;od5)T-MmP7x_&>?E3F9Kg;bw|Mi7WNwH|@=$Bdn0}+&NI~ zD2Ff=kmb{GBL7#Ynp#Q150zjA36%lI(MN45WSyJBgE6B4Ae|&f(l-j{z%b%8JkS&* zXVQq<3@vW*QmB9)Jj2XsJhiN+PtT|zE>0*hKywJO?mY=5hCDZ#a=zCITu~#U3>5_r zI2XfcZsc`6Whm6jpR_cYAvV_=0UF8u5#|o`iG)c6htu=K*Br@LQnZjDLas*hZms0N3$IQiDE1GdT>4=B@!H54ZQ9E$VfLO%&O-(!z2?B z+#szuC~08;l0UFl*W&|7rI9uhsvJKkTfS<0GMcN!GR^uU7bSJl6AGRXaqTf45`vLY zEU-$?WfnuZ&@<0OeddHbb(IdJ8>XQ@H?5m(nBugdNADo?E{j+y@esy|w^{PAQrN7mW-sR=_<<;x+%d1zXzn=C_&(E%Y`dxjO z<+|?&=T|3}z3!lQ^}gS`l<#_HZ%)s8SN-=t^#`Ye_k+{(vjmkZJ;C)DsRew;hO z75*Hklp|8aW%h^?KC%bIgnLT#a=EB-Ib5Xl;Eq+pD2#{T`gYhO_el^#13xKj@p~|5fjG_x;-eoc!Fq z?4AsIm(cGGAQs~vvQOu4-%2?0<&{o2=3lww@9HuZDeyOISJ&C9;e>em{kMfId?r;L zPeoIfT;I&(2OPR=9!UivcX*JIw})qrkpnc4gxO?x9H6jy?7V2=u$fl3bJd-x2>g95 zb_Q?z&58*;)zU|EokLFrAghiOwR@mJo16UgV95>0@?ta=oOnuQQ-zhtS_i6iYSKEYWh-AcuUJ+L)x#u#Ut!mzld)Nx z`}(p$b#~POb#!E<`IpOwFcuW3pmM2Gp(KEdQU(H@kL#!2o6|Em>0J&^U!R_I2R-%H zlyDp;U$%Gls=lOb;6VUi<9R*Y%YGLwE>C}ztouvv_Z6^fOQ+jQsS8N0KFbwYKc7>S zB$;D!(*;T!7DA_0ENrDolFM@ws%tyY3w5f(X3^NAew<}%?L$2xS3vs>|S zwgw~#3ulaubyP^1IK>RB1u>)`S5AVt&dnKOwW?Mq+fg|P0~E%}B4XC81&ymMjLL#< zma$~1!=JGm3*1ptl;mDCuuNSgsfk-Tlhk%53E7q~U+QbiUpnf3mRuifl*l)A8B4iZ z=_VIb-@MiGFTKcRL0$8?R8`Bw}op9V1(PoJjF ze}+h<)K!;EXxbz^9Wo<(twaft^Q-qDs4z>K2`XAF$oA5Nwz4TPWf%kJKKLX|C?wMY z6_Q0QS56nwcjjn>v)79*(@9HQ<9VS#eki|pO5w-n@p*jyBG3N|00960p%}?w0PX+) D0elb* literal 0 HcmV?d00001 diff --git a/ansible/roles/helm_install/files/druid/charts-archive/postgresql-8.6.4.tgz b/ansible/roles/helm_install/files/druid/charts-archive/postgresql-8.6.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..314c4285fc8db9b4e326e860773974541953b097 GIT binary patch literal 32017 zcmV*8KykkxiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcciXnID1QFdrjz!`z7!2lx!62brj2PyBgx&oyVxlua zQTQLLeYUo?wqCt_ss7#C+A9CMy|uk#|JLufpYOcf`43nz7}Zb81!DiPwRPKe%7gn# z9+Z$@Fyn;AJ-FI#p(M$EZf|#9b+%doP8jx)z(EfLOK~f}+-D>as?fpV>Dl4_-o@bo zDEfgRWpJvA>imD-ZUTyd4*rdO;V~Q{SqX&@Ab|#mV~9|UMmWN;XaVpE7dV3TcV`E$ zkIy&YnuxKikE3LaIN?BJz@G$;IT?_UhzX!LkT1DFae!FBJ)cHNNKowKP7B8)65}3> zs7S)rm`1nvpfNuRA&EcPJZmTf~z4d8(`&B22N3D-|a!pyldo2(0g3gbYmm#C&f&RvD5)xnGs^x(nrd(jw zijf?E(+H<2^Ra5$0zD#PoDLMs27ddm7f}BrW}Z|3%1-q6o!!oh)i`f*6Xhz>K%n*5EY6 zh~rI&slXt{NPrj<4%Z|MfnH(8NFd+ismhx`Vn0j+5|1Fj3629C`vmh%@M%0Gqmho^OG4n<#32;|+z`JV+!(NhxA&Ezxey3WJ6p(@11Gg*P)4N2gEa3#W9bycKF*E@> zoT7w|!G`_?QOX4jl$w*OHhhMqCIuu`^e$yOPmt7BisRzd0~3gUbfK=}03Qjs%F5#T;Jmo&T&B=E>gi5d?UUjz*F{kQgEolK>AKT+GsJ zOlePb%)4u{d5^~40P(Sxwc7&SM-PT5f%%inWY&B~e0D%p)$f<>02;j=w0 z!ETmkICsx-vJp?Qoq3E$^Na$8%R#10ntl5;7Wk9l&?i*jo*JA#rq{8hX=zo#kcNR& z6F3AkHk?YgVVdYLOWnFL$(Ld(R0{(f((CDE)S1)7uVWlT_D=R~XiH6~)o!4TQbF5N zK`M+<@fWGO2nzoAnyP8j4Z9-}6lDECF87GUIFKq7`>&B)pPP_!$!vTYMJNt<5Bekk zFYWi#zi;3M{8WG!w4n{&aHp@E*8oX)v4J$4$X17GsEqn|369UlWGLWzO#Cs75eM%3 z+DZ>5a^?C83KDQwmx$bwjY({xFr?Qw*f3J=Uo?dXP2d`74x_a{!WY5yj~3imj&nd_ zEpj9It>>-f9m|!} zYboeXX)wKhZoT|1X4EVENhkm+(3MFB3lB5UQTU*<$0y5Q`9Ab7wd<=WOFJ;p0 z0nXoZ-L!{v)7_SNVY;f6XOi1U0ZoT`rWr=T1n?)qgcp%_!1>aILUrpGa~ew3aO963VpW1 z=cZ=tht6)7rLiCp_VoOC*C_0aMHH^RdB5-dy7$(5d-%)YTje6mfZJEF^`K3M!?y13 zCq^SJ=F(`O)ti1{pnVANP_auipjUWPv*tntF{a!Yxr!H$(G@P>#P!Z;l6ZG<*pvFL zk7CK~1{nTP#BU2^2j_VsIpVi;8)tc=u& zl+xj}qSygMlS}8FS!$WVsaQ@%H?W>^9Kf{{Y5jcehk~1ZBQ;`v#R$(00Dm6n{*uK)bV$Xh&pNp$NeNVLhnwLP;nTWP8?{ks53e7_ULqBq-10Pm}@vgrX$GJw5w^p~Lt}{gC#9 zru?7Yz0)s1i`gEu2Z*&blJYv)U-02~1x+T+rAjm;qhpj>{F7~elO z*1a*+tP6yQU;v1X_#GkLo%kcw+a4S7A}k2}lnWXuajb-b6dO|3Im96tiaGMt=a7!> z4cuQ*C`qMEt5!SHlF1p2Q4fj*9EXQq;sNnRX8fQChx^KFEtPNC*ylDMBZh<5>2Qb{ zZ}*`6a%=1Ob^D7VOry7xD=f8|2~DLuha?s9Z{|8`BovUw$_^W2#aVb)t}*Nj$|h4- ztW$h^?cMHjA$7o#4y5#-GWPb}@lT;o?TrRFn8xECw+hc76(HrmDtM{9dSk2{S{1eC zpyb(bZLE2JqW1Ml(avlTxu@y z<;7%Src@xBa?qH z+F-D?b6tS3408$jTIp-lC#*}BCh0ZzuW{fw=0qag-Kf|o!6zr-nzE08LaH_<9Dsnb z8VJjv8wIr>luP0_Gb;ZI#g2qeD-A&@}0M zcq98e#f;G4TtnA`=h~b0_F(UHiea?EKvF`L>w|?oXt$NKj$L79ek7C$`zOOg^2ruT zhxrf5)DBMU4-yB3ADPpnry57hA73INSk|3-GbC~?+jssLORZG>5FvMshk|vYeLlre znzude7dWL{@L_27iYI77h87teQE#eG`k^q>JgrA!#t zl+_R4QWU&KA&Py>^hTX6=Yqf+ZdqnTVk&bl#zGPdiAVNbcTKkFNq8i{`Cg@kuxFq@ zW%Nq&3=~hK^*{`nE5D0EA2-hF2u&1Mz<;EK;lRLpF~(-Bgaej;sl5@UJlajSP2hA> z;lkJ*n+hqqn&b>Rjsa*4oinrbM;UZf^jiyDSX4wozJXdst&2^$4oR%UK?pRK3vh+l zwJj^%$r~5|ax{kySVyN8*%DaDq1Dl5`qO;RweMrjJ;%{h;lcJpng(8?=afm?7GI+< z?5;7)1tY%jOfy5u6zCRm>%6TV!MTF;48K~xiLmpM2nzSwm;L$7g9gpv@vo(V@j|TtH0F#H6V!bHHat~+Knaojp2x1 z8DG9~%jeVEvE)?Vt;i!F@;>@%zX)EvdIkA>NexvA-qn|eiAEQgh$GM;M{8?vG_(%!%|dvR_7wlO z`|2O2yxGibd+n+!?e?ab{cNMdZVjq+E=Ne>k!edRw%Q%PuC{FTLfVl0)N~W4VYmq~ zo$5;MrUK#G?ZdIy1b<8^*V`>wTY0aTbpX~!$&Q?*y6HK*QB;D=`2M^9L3Vvlo;CHbSWSsnod+_}mQlUdg z@=TKe34AUX5_mKzh1pAKCggx5pHe}Of@^`s-A~j0$Yxr-hZDpyQcGu+pa~vv?TW!mBtR9L3 z558lp`D8#pWv@*PP7gjj3+S^=c$U?C_PgO)d$}Dmy&isXBJ#3`RRyK~qPDy&W|}ot z)l9Jk;3U94V%)li6B=Y9ptL8W-q`ZyXTC2pp(!)rVR07K1v$F%@1;+tRS|NgfHV>F zuE6O|ekt|?Y7Wd?DoYZ!cb>mc{MNv1_JT_M=v*?-5<5~1^@$rjaihN^H=4me^kP^I zFLK*@Vnvm#2<+|#28OlYMR~F?7hFeJNt?B(%*~BV_Gy%)Dm>SCa#XBd_DdfO(S2wY zRXaI)u=8^Fn5Z{?lFFZ3QLAOVOa?D{kcY}F%0kzq_I=_O%Lcz1*iciM$dfFy8uGCE7H*H5Wz@Z8%NRn{U(NVa@h*VWJm0L~H57+w#Dq@&2 zX)EZ~%%-1O(N64_8LH9Yb2?;&P!r`zkk+1FhHGm;V*`b>5;xtls%_a^qIM=?>}&7R zkkLrFHZ_`bGwWqwA;8iEvw>brY-C7M`=y7Uw6o8822?9#2>4SH5}&B>Y83*`Ew7Wn zEAK-*a!l{fUku6&UQVgnJU{{v!&yAJJHgL*9$%hYQL0LWgg>_4FKP8Kg0uljV{DpL z*jA^&C=K*9Y%0@~VdQHR9K1FTmDj1zdr~CAd=sv*2_{x$%$AUzFd}JEP%{dHWDQ6L ztU9Nr-9f$z8MxLqu?lcMk_ z-qijD$)ap)zkez5Djofzyvz%UAk`js#`z>RFtrop_nBQNX3)17qJ zoXYCWM#~mHmA|X}Ia1;i6dvFZP27DcfTXxrwJqOC5~QifUcPJr3=s)ahA+kp^DzyB z9_%Vtfucy~?RKk`LA)3^FYgAL!h}XxFyeDbe3S{ylu54NRfk9C9NuU)&SqBCj#W`( zGGv%ArIg@QkpcG^lGWN08STmQbB36A=ql+ z^&rx%3&7TI?dIRrouWClPmrog{ zyqja%v+Q6^$CcTs)`7@s23zjT$Z;)>Xa~*hwmD}JW;&rcvUH&su9QOmPPwpS=ep=7 zBzmAIK#5|&kwmWws8CzpoxGuA^brG>q%H*njk#cm7%`$6v(nx~YB_N!mZUlk^F$$* zl=yQx!cIK*TQthbNKH3kJxE5EPPq1Eh(@1Hdq1{6f6k8^wkwVYwxI)GzIbLKw!c6Z zp6_mL!9P?GzwKuz`Sy0?ya`LP0hHhT3NvNiNTE!0+`j!>u{L$BLIMys>|9b}Gw{o* z6Vhiu2NPLeqTm}%#9zL&W$7_Z**f3o6oF5p2uTgqY0HIXv38d{t7u2)WlSiycl2G z#pGQP;Y;n$xJPhH#VsT4odLE^jaTcQDFp`=A~P%?73K^yU}U2E}WinJx(@nkMZhE zMY)>vb2`xM>DojyNUliY6ziChz($&I9YGP`*jJ)M?|}17;AtW?jZMJ9HwQ}bvnvb( zjAQtSaUxq#@>YA0CKxf^fuooU6oxi#mkX8}|Dd#Zt}vSbClLt|+tl`t)?*YbmuBRb zYK#hc9cV!W1Szw{h}*?_-=gck6~6w_caO=LY0tiGv|9ase}MS7^$o}wHa!J_otRA6 zXzX{~M0)0LTh+h;(ZsV^R7$Wof_hDp#q)AyPxPfh*l83&bg zQ775bDKgZe5*&xaEJlZ69WtYf^^2>g4{wP0IUP8|lr)-?{xzCxO3K!4jZu>5!+kNo z#thVsf}-96B+BDFvgf!MRjzgGgH`c_^H_?FI^5a%VfTlP%*Ijv5uZkpY&S%{4r>kw z_fzHV?6g|nerpn%Nq7?^lz01jOhW9Uh{FJzv}GZk-~hh;w$*YEv!FP4GUEWQ2-2GZ zbq28fXpCQ-)dPk!4C%G(v}V@v9~EWo5$YWN_QrE8MIukX$de*9)#s^7i{Rg>myvgTeI4|Uk7MKRB8WkXmVg@1DzCrFHc zY=4$^AaaJ8orud0X@6Kz&;J|MghmQ2&jX}o@vn#%|g#(Ld;BjNF*2% z%v_0?amdp0Vz#^NdenAkvRuQZVm917k44~lLQKFT@)wq>s`&BQ3BJPYSu+fI+A~wz zwfbt2l^ceac%F7rHgC`ki} zB6;DROo~pfbxx#iTinbi+*-X;JL`G}eVHk=8VRzSw)v`4nnB@+u9g;MZdJQb^~P^1 zxJ_1SS|}X0n_9)Zm&uvz5MkRMh-znew7UW7ERTf5VR$F+4h}DmPD^Y~1!^^1!$syr z1djNJt=~IbAd&J83IZl|5cSUd_|o2(0^#)C*+mJ6WxD4H_7&-={_F{@FYPQey5Y=k zSnYufVci&CMe;UGSr*>_`0~YJOzmpUWC+1s@!uHqgXbw!e3Rkg5v_7fW9oZ>(A@K4 zGl?zBK@_102=oyHf+4B(uv7i}c@GHODkUHjL zwATRX|8*|}vAy%Ev(?$^Y(Jt&ZmzHoy0;=Kv$GlmmkX^ zQLYsP38R7bCAtTmI<47+`&91#T#j*=V8%NlS;bWlv-f|V?`%KcE$#oj+}+)J+W)za=kw?8w{S(Go(lUN zk`PM)`(q^0;*W6;zU_Yb(vqcGho2I2U|jnu%{IC6mv&mFoM)og3S3(YQ!g)|j^^qx zG$odEd(=sEE_pCZV;^>(t3OF}uF`fu+shl19L9)%?zgQxHJ|B2XIuXxMIjjy8~{oZ zolCRzE7q;6@RVO+T?X<>r> zjoFyVRSi2JM;Mngzh`A?3pB!*6x8;|H)>eDADd5#17{{_bt?r$D~Gl4c3}V$Mq)99 z_J4BkKc&1cw5I!AgiPxH{u-%ApO38|qUERco9q?>r}p~%S=g9QwR5%;YGynKsI7T)!Kc}EhUX{Gaq2n(2OuWc?;qcQJZZ4av07F?DY%Upj0h=WQ;Y}Se{rJRkkF{=u; zYgvlBH1ywg95*VSR9cMf@7mD5Y)E=_bRww)v+3)7)H$(P129hdY>hJKLL#|N%eIBN z{m3bAt&O@GnKhEtktWDg!IEW^aZbIt-r3%^K@TRsd3MNECt208x6)3@vb<&qxs`>I z<%0n3(*$p}2B|PR_njXUE$no*w;-UH$IpcA42akSPT?A>2yc~3DM~_ht)SUa9(szd zLpmaUr?nSyY7hB56Y7``0%#D_ZcUg-AR$C=C|suVYi1{wcd#3y6K|L8qq1d zvpm~FWS`f=l)y2?URk04wRq$0}y0+k*;o8P3%*1aV z(6AuoI-b^rC^xHDJN6k~-I-`;LUWC9;!f3q_;ISL`;iu zrdw=c=otk**V^?hUD>62Cec`uC-a+$$LZ96O%?r`nhUA^^=-^G2I{IlnFmki!GoIz z)t0tB%ihr9HpS;I$jn97&W?)a6H;hop|x4AS-T>$?)7b!b$vfA%%_FTfvU>rAuK%`4Ut)i9W{*RgjT#JD8H*h@7>HCY2hA` za9+J{Hs6@AJ}j!LuRvJ{XYOSw^Ke&rvC4eZRaa|(Ia~2AQD~0(l0P--?7bw5 zps;wONHyxJZylMByPA3pkSA+;W;pkPGQ$QI#8;UwHN%AI>ziq?i=bM?agpgXb=-+` z&}uWcEP>HFMfX7825_X5+f2nULsW5(^Y@pWlp^g1)ex1kwh9$dHFQTx;tU1RL8L5M ze%EUm_vHvTsh+2l{QOx5N`B4*>s*;iS!uJ*$+l8s7F3m}v+a|`M^#yI?s$rA)C0VD zTMdUcALC^m`U(3Fh5bsFUCJk7kMhj%XuDZG_H_--2R*p4%e|6g=kCysImm`TVIPvw zCE+@?fR4qks_UMXyn#PbD)PYW2EDme#%}O;qloLzdD!+!fL;=Ud}<440Om(;Yq&pv zH-s)tMAf~sB1*cQOxwT#{0=uarbLxQ9R4bwJ54@jpX|$%f{oHiMSb@PaOcs8POOycWhdw z-)X&9iPu^+VnigN0=Et8>h4r%U+JIOD3BO;1nr11)~}##Qlqt{-S*|nOb(kjUz&U| zN$?SuC}boawV`8ELKZNi3`hN?V|s=1BCe>iZtC2Cp#MTqxKUkuTAf)Ws?@oIXO0uq zDcCHc^uM<|+bNzLlva)4=#BcF0dvD(#p2IJDZ;uZ4E{@)v^nQz# z>)%$nQP^xSQ@@zenq1*hRXNRY0UjI;L1LhjvQV1aDwbD_vC4D<0gaytqjik2$*koV+oV!FuN>5@OzY6Vd^+^=42-^ zJ^tDEsk{73`Dv>6%oJbE^_d=AZMQy>IOyeG`c{Mm3Xnj(7UcePwqLDOquo)UP*{vcPe%sDfk6h)@ZQ-IC|#GHIu<9=)|*8qKU?-_6Q) z_RMAy+yIFK91GZ~+vcq!gD?6@tU}8@<89Nz&d!bSDD(8Z125x}I z=p*)Z0OF5sGWaSVW3D93r}r6nbIUD+v#F(F(3Ll+Q8lYZ*3FBlkv*Nwe|qZVe`(j; zyQMxbNB(c`yx1wq|DElfou~8v_wqP)P@MuGXTKA4zpWSUhfa7#Nq|>IZfxA4_^yul z_-n{p4DQ`lruKsPyaMm50H9OrHzq&usTyiWq^9-d16UkuN=E&HF#|6jb? zn&SUkub%k-eLM~Lzm9D8a4gs)p(GZb4x~~X=QAH~@;^fPUuX2^7e_o4qOD3&p*kk* zPR%+unC)wp2D9YA%$d?Nq@=@u#E4A}GPz>dc!I=u6TVS*-8g*w^JkS3S$~HwUlfSn z*zU0~A8uf<`vS*)8Q5u#K3||UA-OLL2|BMR=?VmB+R+;}DyCsZGk-CuRLTGCbyzF% z1J2?9Tf4i@Oa8xCyU(BefA{iK?1R{#FS`{|eqXo_ZKW*RiJLXN=c{GYcEB20Hs)8Y zBMp{eHZyTs%}G}}|1;M=zw%eH129+r+ubSq|6ja((*N$|DFwuo*M8mVHWa_SI+oww z_|-1)(haHgi`t&fcZ!!hvpm7R#DKmc+41R?o3PoBp>Pd@bE!#Xg<>A@joQM+i<1op zE?%ZQ3%IK44&MTmP{GW!4e}iekvPO4AAO(m58L0rGA{&TBP>qU>r#1MxCMO?@~Ni> zTR`2Nu5-@)6*WKoRPz5~*1B6ufI0ks=lP3r{?}LA&!0c>|ND4K{J-+f5J~l#?H8^9 zDM*IeRacwUP_4ve<;T}7H!QPHXmDO&Ul9`i_-)~u?nMUH7*=z2fxUuxFAV85ka$4T zIB@RT;)YQ>TswbJ50w+%Ux=S=cg)ipG&V#sa% z^7xd5#3v%VSb$sjijlk7bp$#1)5=2QrZdf9d#BhJVaNx zRXGMroC%{57Gs=p;66hMo{ULM@;^s6OGA8X1z?{2_x#1HviyI(_3}yn z-^a6r|0k!8Gf5pareZ{6A}AyAsN++HDOYz%WCrAEp^@eFfGN*z4V1BS`Af@T$(6?p zzhdR0DLzmT5GmwQsAWK{54;>Xq_vl(-AMjnj&+V0O_P%KHyf+pIGT-YN0(1RShrwq zz&CIMu>tk_zrMSEtL6Ml7%F-xKlGFr zo>}YPm~eN!{&#kE%KG1{o!zJPe=pAx_P?QyV?!Ow+5hK-MygfEIqA_Vm5CZ4XPf^E z)%|Pu2`Zf1HN&6dDOj*_(XUW)`P!Wk90w}uFY^{8ReQh9hd^S1*_FGuVBa*E|LNLO zp7v&7XxH@xi7-t?Ub*nP2|zXN6rifubI82@YE!{F&Ar+?lBc%U?{I(W$y@V4PqqBd z(}s9zcjk7kpxN^O`PQqKrSsoATU)zN@t^ndG_%E1Lvm5d8M(yD6WrwAH@oCi{X!yYPoh?yG+C{;A)wdPcHHQLkKnChd#mYGhw>;S%7 zt6gf8#5BOu;U@0eswk6+xvz}*x+2eU_WI#4eYpTGX1VcwL5&JrWA!fX| z0Wh7pILi{|H0I`9EMyVOdK*W-sFtQYEBP0H9MK^2xZ1rQP!_8TV2OP_4rq&!XIe(8 zW0$I&Ck3bz0H(NKy+EtI9IX_3mN8KsIai?MSt+M=@#@&LDW#axl=+w!OJ%QnI{Rah zS&@&;L|;%eailhsQ^#!oOXDO5^NdR8j@2P*^zzz7R0551lfKevnxgSbw zYhivf-KwTL?+jkOdIjA8Uv>F7Y68;T&vvZDG?rYAXc@ zFU(P_ET~xN*=p`JS?{IFJRQueYQC2%)=*_xiJ79ux=~FNnKPRkmyv-3pk55vpj*W}wc?Pkkz?m-DJm_3uxqJ@X?7D{XL?uXZ$4GF%IAN~g$; zzzXc$uQIWyp#aOVp4%`Q9AA}n9X++Lk1kI3j*l)64qv~2Q(9pveUO<&79%z>M>n87 zL?OrJJ0k)-NJn+&5Nv;^@6IpYoE@J3@7v2?-o1T)e0X_qbXMJOy8*Lm&+Fv$-Py(E z$@}BihckMabHt`Si-k2}qZwdt4)!kgYSBC$n|6Q%Iv}hLt2JDI+V0p*rQ*#`FC|!nq_?0QGe5{QTEYI&xz{Z|5-y>deQ)6zbXwTQb7oOG?&*m*B@hh0S z##2^0Wa$oA_=%UdS+>Wm-hR5+fnk`YhLBw0>~^+lwY5;B2rd4UZ}{ZERk+!p*3|~a z-8S`d)&Yv?o8ih0ekC`>RSmk%@wAFTtq8(tCSx*N9^ep7sy1YbbXHNi++b3>w^M8? zsovSqkI4x#;3nY4PsG>eXE= zbAQX*0}ofP`AQq2@)F68Yqjq#N|Gh)+=?bkZ7oZbrDkqHm8F($MV6(umY~a7KF?*8 zlX=p%=qa3|oIG|PW+en)qjz}5uvU^GeRgj22DDU4aXw?>D#%IJu>dbHp^xHnnCw&a zwJ;4(w=Bk%S$w%4{uhU@RMn~sR++9@@B5t&H!8!yNq~K1Hw>!9*W3#kOHDbqF~Y`- zbNCuU&0HP6+MK60ImPZ)zd6cNq_`C7YqqJnrK8$<>D+Ba#C>?uLwTFI&VUI zR>jO6z-Snfc=TQs=|P-^VGCyR^V$i}40b<6B)YIiIqU(dasV?-Pt#QBt=rL5P`Q2z z@wYB!=Ufb83+n4ZY)bjcdw6GWO}nAI07zL6$22I584XV5!15|Z;PknuDvx&s+2*^q z1(WrU6!WqrVXkAYS4+vCmEqPk9+cue@>Q4W&puN%@BGi*=YM|j>cv$4uif3Pr~9Ao z<++!WJ+=AXRdU_GP&Sw;9%@E-+mk(}na6mvXZx{>&xb!#WO7J99V%*isK`mWU9}8W zaI7eA>FHci2`9@PEUK!!%n=*Q=@vR#RIYT3j+Z%HR8jTmbP@a|PZ!Mx(>Y^f=;G;o z(bM^&Is2ba=Zl`s7d@RXdOBb9biU~6e9_dX?%{mV3{7SFfYFS?D>`9ht+Qp17|pN$ zbjIlEjL`$1Fh4LcV9ww#k4}y*4qjjGoxM4)#rez*7CUn^ z2jpK-ctBWFpnu(aTZ_#ZV4F^E1-QPnc$(xZXI_GPzJGRfdU37}Lp3_bQs2WO5N0Pm z7K3tD%zUq=i)E(&5uS>h;@>qH(d%*9^cA2t1aleMt(u9x~7BqJ+T%OJp-r1SL?YNs1CNnXZ*zF*TPN z79jfy1V-VGtXt6O`QhH#{?C_t7Z;702V(yC5(&|mgP1;kI(mE2oJFZ#h9nftIo12K zx6N8lSy&?vX8C8Wkog5MzJEOBC!Hn?FFz7dNfvS0{XVOQN`dv1V8DKPN-*G%$y0)X zrvw8}2?m}L4E&7}4AdNq45c=`e|~Y!P3l7D zJ|l_X`N_!2&vn{~pK?JX)t|?8(Gs~9q7mu=UBbRD0H}kR7XZ`K`si}dR@9vh^WQf$ zz=Gbjr=%EvwUDN-;WT^$YP`qX5N-(7g2^Fjt1g!v!Zf#%^bqB?n&yWngH*bI)ESGN zwc;g^FhAtVwz+tWccJFI$nKhv+*L?aUHg@xQOyFQWT|hgVF$e*)hTAeZ~*P27$^%c z13^KLz|I3qXbLgLf&9c1?33XH5M<4)SI0@HZ_b&4QAh_U>=f*;a`}z+@;e3xdl!3` z=kMO1?H^vgKRc>*vLuYeVhHWbMC|=%(EU&TKb(gV4e*Zv;d1l}l9~f0#&F!&aFDsu zJt}uBr+eq;54uHF2Idhv9GA%pT%em-yOfncx-N*j^FWP-NZypQ-vA_9a+TlKU8k#` zc?P3pJ1g{MR5!V-`B>fBEv(4u25)6dRyXUQPbi|i0LT> zVx0=6{R78FUzzPhs3PSeoKtGQUQ_SlaMA<8QrDx0*ogPwL)$aPdFPK5vxz4cRC^QJ z#_BHgtdg#@f1f|x85<|Hi*jn)_4|&;%t$<WW|TOo6@W+&GQ; zP%|c9=2V}A(1C9%e0$1uu=+HTZqxJdw_e+~J3!+(dP=1Dlt>X8Wl#M2pX&UNSALf4 zZ3PJ+XXk(1+TMD;UC#gb^40cJ{>S@x-29JM+cuk{PJFaLCdc|rfnRMdN6Qg)BE$VK zm73JiX-mpI^LGzUXe@1g#}1zvxR@*!rzj8`VWF+`srHIl4HrReBV0W+VLm|ND5FojkuADIrPhYnBjGioQu0xXL`eQ)zTK7A?E0T;*m6S#U9Mb^H zT6s(MP38Eei+`?9EWC0N3|T^4b*;gj*dulPe?_ccJ#+Z~&huAg{r~0G_7nfVm*-A6 z``?tQCn2m`&b|TP{N7~NA5T;MEv|5&$n!ecm8dN2y} znEwdHXoMrFVDSkTID++eX9us3&o{K%hGM``GDe(mpfRYx9Zm)$Bx0hJEgAzCC=L({ zxaZR-35nF{I<3}El)=w?n~*3`4;*9oAjX6P!wJpd9uYB42OXbASxliaA9zoE#~k2;=wzm0*kMHH1_F2S~t&;-H)0Wuwz-T{yi; zd~zZvo2dB?^=W&vdl~@IqyIS_a5ZDs7_Lbes`x?@rx+4OuSh`Rkt)ZfO6H>g$36x+ zG`B|aP2g!FmC{YX!Ur)%0Uf z>TB~1F$`CjO@Nb#gotgzzf&#%1sN=tX5^P@j0*ac%a24rkTP40xLvIGExP_&;p-oL z^&O)?j>eEc_HCop>i7Et#K)~~K+d-5DG0zc19n0*_B-aaJ2AGZfdit6XS7`?!QyO; zuW7n?Uasuvn+ayOVJh!P;9IQ=YGz3-*H45KAyIRw$9gHrJ){%0$n~N&)HtSGaiVho?=b7r)QFPglgpos$e#&N^l$wHB}EVFdPtu zeN(JoTs?hwL&VSNz!|2b(VXTl4K&AK{3C^3{;4NqTT`|ghNC|CeSot z)W@7lylRF=H0CtKGicH*0mpz;m|k0hqgXH+==CXi4B-aD14;!K3?&kpO7T;{uFSul ziEzyO+i)q8#9no_NqhZhBe3y15o^2u7Ml^Iou8(Z$N_lk7#6N z8|$=MmQ!j>E0P{yNI4Glf>7O~Byrg9NApIFXhS1C2U?P&BRc6&ug87eK8Y=~yaNsv z$B1igDKIyrb4}St4a3Qsqm%y!N3lc^-vl2s;h7O^!Z1v6ECRg@7>%$P<5WpxK@-`% z!pG}>o}Lv)kT4o_2vs6uZ4FKtRtE(+5tz4H9^`0;?aubjcP-B_md^I}cOV70;!&L~ zZ-51QF2A4tq6LN|KFq9CuuLS41I)sS;?ePtA(t_MOB5EZaLc+KAa+kA}pu>_w}_T z`>_EWtEB}Y;exAX@~>K?8q&0_;=sx|SH^W(t$)C`-(Ha9+i!dDmPnx)h89)aERnv# zi0n!)?)NdNLcfw8_ER^&Ar?x47m)4HekI;bwY^(iDzlEnueFZDxA)&kgir64nl7etic!Gmn|~^B$dbn;&SW zSC}ypU=IC?${VX1?}DSdYwXy3-PPE!2c2uMlU4p}VaEoL>mwcgVvlMUf)2qJGABi( zewK2ra61UJ$B1|VgQX7c2zz%0ZEa8$sK z_1VpAr~*4m<9UqFP41w~nd$;)a0U|njFzH*gc z+YRf?yZiBbX_45}jD6+*?GMclvz!B}_{lY8AE&8pB`a$hJ2Ujd8#pSWMc4KUn0O>g z;!3o^dhAr_ht09`@Jgbi4Mrp;0Uj)dqnX&L)DN3u=i!w^i5;~Gx8%~AiJeOQusL=f zUP-)x{$6;ECNmYdS@o;2Q=7H64>zE)*B%`xvs_2w6tJ~^J;pH%k*{_zbtDIg;Q@m@ ze8XLW0ua3ff+s6X*u|TgyuLp>|e2JKB*jhea9Lw|qv76y1k;#dbrsHHZfEYi(=;J^2}-7)3jLr)Ud z@7)37IEX*|&j%y~BZE}n-#`4%2kz5E{h{yw`u+Fcf4>c*n1Xdx*wGg$HnBpFPNk{K zFz1rN#)gt4cJyLX4Rw|VX@$?~u#;5m&Q$lPPB+DAG{DS;`8a5i3(Ti+jD5A%9`sou z?WoHv8+kA^k5Scl-?AHtpz}vTG5NDP?1-?L&Xi+kJHyPy+jGFN&n9;gg|qy#!j+{i z3~fX^xqrv{W^|aY+WJ#`NGpVtMK%cS;6xT0kT_Tx)Fqx3t}J!g>0;PH$)iTjN@Z4d zf9D+U*Pa#0a>Nl5{us~*k(ifoWW>B~cxec4^Q;^@TJMx@!i*NW5!T?(<5siGkA&F)3NS&5DA zVkhNujIE03rbqjjv0cs`R9*ctcIK|FS+QS_?XlQtz%Z)om$9P~L7DManie;3Vhqrs z&Kkw-8hdox=n^~IxAvSCurrDMF{3d}`96&zQV~@t?JTF5Iz&PQL^Qx1*b75?jRUh8 z*MmNd`!Uf#9jl&5QxCi?xDH04YjMOnhwUHXnqTwZN|CM;%U8v;jn!is_l}F>o z=70sKs4^Wo@Sfvg8Y)STf-4mJI4FoVj)frc=-!s#4fLbQr32@Js8ZNz>QXj6E*b0I zF(!yAJ4W2N$pkXm_D4J^1c7I#J-wW%UXO`C24WAsIeJ&s`2%g*5PhDX42PJRla^Uw zxu`3y_plwQ)sO{IR0XIP>pPS%udQlHs5pz$pwvXGTT*dAO1pFnH4HRX+U7&}*;b-l zIw&@OO&uq2dz^8wKp)MO1W-vDM8|XGlyJwv;x}-~P#n+*wzt%=K;$b96DDI6rx9kv zHxW3OO|Ua{K&Pt5DeOF24bvIhQUp|{oknI`E$uwEXT`KLb@Z*O$Ew3{kMmhE-;vlk zATv#zDeOQ%7z}9`Jig_11O1B3H+@)FnJRRk1maM6tN2E)AP9yIRxcbuYM_*VYXj5G9uoo5;mwgu1zP zb7?^CsucZZ!5%31N$6iY^`#2M2_gZgNq^j z&Ysn;tfjbeX?aa^;o{<5ip4H7x|L&Re)zMw{#T)j`OebBQ^1bBxnWMwLIv$)cRoC>XQeC4xdvty-`R7nk16einU-|M=lj$8>S6u- z=ZANsg+r!yJpDHB1N+w!lCb8B=b<>eATp5cUMj65bmBibD;zjVi=GKR8SjP z{K}UIa`Cf@fi5>X`0)F!>MQK{V;rO*X4M<<)8T8QB1Bhsoo~P?4c7U+VeAI_f~Cu= z>Lu*>YTGAJd>NFF7+cf?Q7e=30fFgh~m`$w&uWtmo3fsLKy#O{@|-XgHMm z@0`q_H?U{QJ$?+W5<7y1n9XzVzztkv7+82Mwxo*xN*Zfi*t(4&GQk76SBZN{`J zGE$CZVlM71CRlrPl(b5rrLd#jGsn`>t}{a&>==_l8Rm};H+P1e*&AAA>^wreSXftw z9Zmx5qsrw}+xuP+sJllEwu+0)U}|~jZu{Hj5#eLhLj%v6iv19t=M#VL66&8ktxN+F1NS(X>L^X=Y+Ix3L};F$UI^VW*ji zwUmwZP)lpI*lA#5Eox&usMJ~|b{d&ji`!TaE3;N$V`=m>GqG-EV?6*{UX{>lW@6pK z#=3thx*P1wb}BD!V?7{SUKMtpOf0vm#EZZzBC(RoZy56NDQ1KQ=h&xlFoh-+;x0dA zrm6;Eemr3;!Mbwt)*5H{`4p!QA*Eh!Uap%8S*!b zrZs14Za{r>ucgRuqTI8$>J1U5K&-bOqGAEwW zSy?)+9qDTJM3Fr)VOleJq2h1?0}Ln?G(v*->S8l}v{whCmS89(G2(d1Xy=t=yv!kU zL96YtB70a7PmktVO)gn-g=5V5DWikAp}05jt4i)Mp{Y*rfuK-drzZx}%F=+XD@*qHGra~n6j-HMN-$G51Sz9SA?Oi3H?X~G>?mM4Ukp2@ z(tPxog2OZ{qR60awa*P~->$y)V4#3BRR?cOsy9a)EY^QS>flNwdI>wTg;q&sWpy8; z&{_%KDPd=}(E8iStP*x+3$4GM%qn4Lw$S?9$*dA~W(%#qoy>}`V8ow!x4PToxj;++ zlH-rC7~_;nyZ<=6+Et;P4*g&fN-B)Dd9etxQM%p zXEjG_sbP>wjIYO-O%qHO^2En>kIKdq-1Bpz zL+??99iR~hCzx?|{TbL{a;z2JuMg_O@9SeB65i`}i|S@TeeS&KhDcy8x`hPYT@+#_ zNIbe!O?5Y{Ew7Qd3H})SAI%Md`a-xE@Fx@{;bQrnR&!|0poc_gggXq62p6pG6jC9- z>5;4fw#M>c+|hFb{eb!(G3yYzv>95)PC~hqTGjZez>c~${4qW^&@U&n@~jQmC`fV| zI|7Yn8|hQn5$G{oI8yOQwtTVo=JI6k_;4BaT!Eb=4MQDq(3Ey$4M-kgsLBoWkA^2y zoHERDJU=V)RQ+n~=(O84M?XmqT9)rU7Z3XiUlIWHdDvtm=CLEv(Iz_RQAdK}M+T^J2Qb7zI* zJ~sTUBqY~jr%{;G-!8nV8ap*R#(%r5;%e;F>=^&;wu-B+6zPE95(k!kqrLGOJOT)8AHRH41b3+sdp)VNQQLnUy9168Ky& zB=Bf{-lq4eIB-+`QF?ata|8X)VxnR$&S)5tc%)%(H*|&Fv|83(qtkoXdt%Mi5RwY| zp2x9(k9hKMknpWWH4f)2(wjDW;kpfK!r;3vwgw|ukJ>i7GSh-ipg-+)^Z zqKU_Fh!Mx|TMIz4`VOT1POG&S zsxx15A%>GpK=7}<Jmt-I<^$tV8ZU;M;F6Nb>EsJ@^+*6-+7DcU{^I;15nSzppz#Hd?Ke62}Bw;)kT1b5SJkfAAdg&CKlFqd4Y>!Y)Bo50B^CPU(**h0-WfimzJQE_4RCEwdGUQ!x(97K9Jb3n zRuzle(*v|4(_B;${uoJR#{sJWj^i{s&uipE-73LVS8h8)NqywB+jl7*JD=OB5Ba=a zP-m5d6SYnZ!Kp>EnUTF^#(mGT`02wiL?eA&O(4|)`903i7*n~pNUWKZBO!Bs)q%Z` z)9F5DE=S7T{+va~$MibZ656nbea!{kn?E!&QX_X-nJ%H@NJ#7;BZHG0^#>`DMC|sD zK%-oGjMG8KY*0(t!745o>Plwc-Eh?=XR9?ESW)OYWq#b=>1=hj`g*K+yZVOuZsERq zefc`#h9w=N#PC8Twe|3joC}^NlC;$RmELS3f9hGe9@7w4TvjeMks#2sY__Mg4nm== zy6u)gYiC4pP1#2|_UtAkBi=OIH32&+9hkl;D+AtgPef42FP&EFD9%z{8ANalR7GR*9C|&SSm^i@ogSckHMCUK3?8elpeJ1F2my^T z3?{NIkqxp?!@2I*A=?uwj3%nqkWOe~Zk$$)h-)b=@fD#ea!tW+R_&h?J2KMcRcc7|lLOVV1gY0={4XxuPD$V`mHo-r<5#++SrPK}?q60{B+ z#XwnrO$rR`r$Ud?)K@XO6NpkS^tAgrACYqR#y!vT(EeQEb(&JA9nD^}jjpAW;hR7{2xwFuRHs_KU0eut0yoARGO z!oK3X>d(&T&%Rk)=O#^L#@u6!1E|je$tm+5WnczbJOwv0)OFK}bg< zIa5{x-^hm@P)WvD1nI%&Su8a}a_8@yL`gW2?2yEEg=T>n$oDA=6gg;K?i>!xOG%3F zC>M;>jazF^#Fsop)sJUsbAdBD_9_f!7%G>dfW!CrWc zCR}|}N@Gv`X&aWWvKDj5Pt}>O?VaZ@s)4rVMN658j}U5QIY|OEfm@o4HOHe#=E_rS z$(U|a=kTOv`XM*yY66lsB&Wc6?Kn_;6V_!b@Wb}^uQqJ1H`%gOd5s!ORA1OvJ_*ey z@^<9%@@W|A&q`W5to!5l+@7>F{F5t?)L$ebAyPb3i*w032G&r*@WGfVZd|=Nw^5MF zl`_r=ymtN^gmlo=6f7l(avbYGY}1!p+WVL-mChszHw=r(&~Bt-ew?BdPp5FmYGaqyj2c)qJHbyMDLsI2}S zfSw1v)$?6tyEn7it?Z*vC~Gk9a!!pIFCTl2tEw6mFRxnPRiNd&XA>?X4cwGo?U;(o^;jlVm?q!C7&Fq#Hj2bpE)E3?hXIz>tX?KSW9zuW z+U3oJCFKtMOt0~ka*L+}Q+ZqQk{z`Om4+j^m(JNI|8y{eI9%f}EUNEHE2(PFw)HWl z*JT;(=y;n_sgY``7Hx{NXb+z9Ti7#1`c<|jl@?s&7P*bw(@es#0V_7{1m;5i*Rqgr zcNVO$)3icmv21@Z7;Mjjp|WgE_l2^GT#kk%&#C37Le83Jrj5i>WO2+lE4St}#;11m zd>Ua}m8k8ffDDJkPeajxJ!#Bj=9CG$(!2K@Yq8}j`&t<&%MM9(dXnwD>Jm+nKdbaK zu9V?)jV7B@&{?Mlxaf*n|w!>I2V-RsqNta31pRnTp!|K zRJ7`g=o2Kyb{v^T+G(}2K?u;73BZ1cNTfA9ps5s8V^uR1NO34J)t= zTuqaEfpuoep^#w|7_CDAPGTTtKSE9Q5Rp((>Lm8nzXHX982*6^^66<2V_es z79XY~WeFf5VFY9#>^d>a*26#l`4Nbk0qt)dV&XQkrS}kdjI>26ScV5DO$$xe=@hPX5@&SpI#ZsCV z$5Zm=9B7Z{0gfl`Dc3Adi}vO896t_@hm`r)#r!1-!%KbDhcUFWrh26)42eKrWSL;x zqqWxg+K#d446s0*BDRZo{PFW|ZIjjZw{}nd;8cfF{MPRM)^^7ATl>owvyJ+z6hcLO z7$k3Isc|i%RyQqZ>i)OHsUq2zFP`t1Nl^Ak!mv-cMaE{;EEe(~|ELex7@n5rP*;UC z1~b)Ugk#K<$es+0vcyVxf{r>XcqFY?vw-1AMYPsAkEJY_s3V`o*1(#H&II&M5y{}Y&x`w5m!^AxLY@{gN{m_$b*dn zD8Vb2XEjIiW_$vSOOi)6*9%VXYT^b`pc2@J)554^8Kk6cWyylmFOdQ}BJHisZFSQV zWARC?#6(4E7G(5MkXA7+X`ya{*05w)c;`(NEcHZAGYr**yHyIy9L__&QPb+k<-vZC zW=Sx~Nm9c0Eo9=CT>jE5kpXNr=rV8?tLSnO%rc-fFcW*Di)R@)OQ`^DQ>=r3CMIkvyji%LE zpXcvX8h|{;_8GG)M(WOdTzK6h)iXIaEg&g|O|{Kn4U`P6l@tqblZXjam-fUvnGyu9 zyRHC`3xWGhXeKycbMf$PkcovfwoYL06URV{8(;|oX;?9%O6`xZs@0eSZy)nIqCN(_ z*Uj&g4^I0*^0p8C>&J8KX7e|W^o_kfm)U~9tA4Qty4mT7_whWDzG}6OvW-+k3bs87 z>qJ}29NA|%w}&+vG1!zHHQWYGAZ5R3+$Q5D^k5k&&;L;mXF3F|tAtcQwX%=M07#xRk zsupPCO~rL%8?H(AzHYARy5J;+@Dto^3Zq1pes-vTyT(Y4drQ`f1br!OntJM;^}4o;Z+67^uW_7gsmZ?xpYpA6bd(C&px) z>V()n2ebpmp9axXi3y17Fv)ODEM+#ODa;1hqewL96JV+LN&nQ3^DyiWcemdRiOa}1 zU~=AAWmS=n*amD$#hokw+i+xsaqQm?Z*Xnokt&vxjDrnS@+4|XQHc_j<{{yx<&|1mXe}l{B z!$Jv;iC>);Y|%U%4!UAUdhOvh%sNx*cJEbB9MQIo;|4uPkD~?!wQI&2wOYa>Rx!zE z?%&=Nu6{{%M1RedPWkySNRU^wJR1q};ss;RjW*RG$#Cf6yWIza7cUA#IO5MOklwdP zN$fqG`9Wr5AiZIlMIXZ;`jn2uM>9q5?(SUGt4`*9InjC6kJMe-AHMl+r(Ya4AMx@Q zomU_HVy0x4!;vaWf?pp~Mfrm*qa(nn%KK(ordyZy3r6Um2$W_zf%6riTF@;cCq4;# z0TK;9BiuRfn98PszE{=>ZnfgYi;2UeBq)!t*Ls7%!_{>Fp}Q8X=*uefTOvC`C^klGI7=FqN+>Xza&-xo@p) zRR(H=cOP3^{%X&`J-h0fLP&L9KGr0smf>^d0@1sKN?Ynoxp~8!w=Ods$K|$`i+S;) zKA1)){dXAFhiAZx7cC(^4*wy37+;N#N1||)W0-q_HZR`fLf#h%?ag2AA6$)(;L|H( z7%5JXu+0|eND32K_YZ(QhjV|i%T=X5m-~z1u=lculC{|vrU>>1e!_tmm%%(?d68O# zauJGhq0d{XBjix{bHi+8BxD>XT22=hq=PTZtc=q6h?TQ>=3c&w%J*rWBr3}K_9)X^ zWXrJg{`~M_ynl3h=zTaEd*_Gy?@!Lh$H!Yvn~hGOgmx(%#TkO*_&NF@YZyhU>2y~t z^(}30i9(|q>IWj15)5zIp{@7IRaLO;QE1WDY33Ygotbmk8#9DA!$EJiH}I0zZ;XD~ zJc0(MO@Pl(&x0uy6KP-dej)9FOcz{2gM%D3+$#A>(9oJ<^TrT@y|Ner(1qI;0uU!U zR!O+3nBs06mPDZR9)^oz7?g#Nx`K1jKv-nbzYW;RXpXVLa;f}4W-45PFnn@;c5yX6 zy&4G%z{ka65cu|}Z_72*$lwHBoTuPhiz{_TqDDPy9u;TVLM3;BS!OsD<|eW(?%Sla z0T$43cx}}4BCu{5XDc}LaKz#s$Lm$iUyP5B&wjo>I61%iA6xd@6(@2>vqY)gtX{%7 zXynvMD5FoVBEwj0U0+gEs}nloR)so-Ruovpm(J3#@)D9a$V$3CqZcV$L1E^aoQPc^ zXTYs9*4F7zC2qa}#OaZ}4JIU7Tzah|wwTe;Tt*~tu&UZ&JM?*PmWLr_i(RNN29l^{ z9HKExXF^XDZieQqGsZm=59o+@aGb%-jD+A;%M&-)>TGpWoCFd@Ue$1r;(ATjSjj`l z=x{<2ChI>ov7u|$`;_ZL}y(JwNOSH-02ZUovGZpgz=?wKXte&Gh3K$c0^XBs4;=|G20mj(8p;Li%gWtS4 zJKir?*p^wlg?iADgvsx`cKRvlhBD2XBSuznJ$!w7c6D&s%YNn1;-!YNzk$^|O;#ew zqX>P4MWbOAI7oUs#iuZ}j#%5fNjga+@s{12tiNI1RX$O%(GlI_J-VbS`Y?P+5 zSo>nroKPHsU{8yli5s^sutIOP)|rvjuWBm1AQLc-xN({K&*01%>iHby3U@b$O&ld$ zJJIY`Vt0Bv{lX3euitc8Z6z~tXCqTj%RRJ(RA)!E?Odqv7Q&r{!h2t+t>kGANm z0vw=5a2Sg9)cvn!>edBe6e-YT?l?b$dMZO_%)9I>RFJM13RgDXEII6x#ArFeR-xK8 zl87NdiHefBO+nV_LM>%sEk~AR3|1w($1F(F%w?X)HXUIcN!fdVlop%#wj=yR$3;s@ zH2y#Cd%O}K|7M=5M7+N|xTwE@_&|~9P!%s2s3DVoGxg4TJA-k&r}K?# ze3hBlJA#cc6_Y#&{Zz=g1Yt$2cO(uXH|$UEc@_Sf{>tk=T`UPe;W0ede+pLc(E1;| z+^MYp-PgN==k@Ynq+pBLEZ1odjsOxq|4Yf%$|R^8XRq;*Oira%A>GvnI&UmvFDt9imh>wjmkU0(m& z!@=v}^ZNf5zyE1opo|15N8!;)5rl_$fQj2A6J-KL`NA)v?U5FAqIG7BJSMR07p#h} zTn>SQ?(rl>%rFtIwj8&E-k&`Jf9}s&|IJ}HpP-KW*Z%s8bi_FtBfAK=zc=n!_2VwXd8_!^Zh=K{{F_w~T_DUA2K6TVk>k}r zzqhd?x^rRDmXbF#2Y})fgso18qYjK(!a4Aa#BZNl9TPFi6fqLZmHGQrE>-b{Q{)Km z3gI~~|D;y*)PBY2)4fopDHMC@TqAUNtv{eKBX>F?QL~Zw;wZDwX9|zi5;m#pNZ1sr zc-L^mk*F8b5+alHkr)mUdc(f#!-1E4*Ak-Bu_+v7HQ*LKW`AK6s5U49*MxOH;1BVu>n!j1dpt?5o?bCCyZH$|f>8HY9Ep(*;N8#0g( za71Wvl%dCbD&CDRf5e4(IzBlPzMA9|QZI-w#DuRV4lV8jAM@KDR-E=bP`sxjZz9u$ zXUlB~s>3KXVG&(s37eQ>nShWGt_nWpi0}quR3MNxk>@RC5LtMIfoi~uTd>eN!o?+x zDm6lxkF1Rfs{);{z!parbz>a55q3JAx-06)1jI&bK3YpzC}SXT$$65za560}jKk`P zU1Fj7df|2Tv5ClserU~1ra$vCW9OHG1cR4t7F6F8~0UQ7FFZZ5jceY6o(Ut>^ zp-N)y*9N=fJWnvyd)@1K`c{fzhGyBK7?2Y*7HLCj5m?C2LzQq)_cjQCpW3~e822Sn zZ;FMu%)=~*L*?0eNY3jN6~juR{pVc!_2u)sSHB^Q$4Qs5{swB5Q%?vJTVW)IgTb&> zq9xubT1;5YF=bNv{S7YeGYr(@Bu)Jm-{7hmw=-Zius`b=4K&55vanTyEaf5*7tYvy znA^DO*{n`rK%1VY`YCa3^!Non8PPD7l8)4gnj`(-rR$uSk-=#3U2_RDx|)8)ih1jj zuLvD=g`Um2?EiBu<2YQ|D^kBAUQkbE*dWcWb20g@_qw0tQ5GzfiD7ytL-I2@e81=Y zJU;f04?Y|mL$lZ9aKYMsFmTf%lHN_JpEn%r!zVmv*G9Q3J|Lgs$0dH@32}CHFaqJW zh#>KOnY=9_*5`P^GABWfC9hl9&m6@ztW$#LGj}gio^91u=$FS-<5<5|*W97CfI2*b{PR%a zLM3;SW?qsR6}-$1!MNdM@* zvfaVU77C;PcxEh0oo&?zV^ly!EB&*p{?BVJZc6pI6}6$T6OZHof;QQa zXisF)CL$Hz*uTWbe^Bs4vzI&R8(37J>Bo&j$Ki5mA7qm5o$%4#WW!|rN09jwA(MIf z2NB&J_#>=soS=Vs>1fOb9L&vO;`Ua8A7zP*QaLrAir=icKg$rNN_DQ5%$IIwSUOmr zeCeTdN>JGPj7m5$o70NKdDaYz!OV|r;R;pSFWiS~RD8&_HqAf+tcXA;{#2<~N+(q# zpEar1=r~S|z%$7+0sB9`cx_xPOiISeR}YZnMKdKBK2j#gSK06I(C8UUyq9TjqCB$1 zd$U9@(ZZd2Xo>f-g^X|;G6?pfAZSsQlm|}6pZY|kaF8<37Ane&RJC?`;@Kynm^}_o zs#PeWM#k-x9WE+wJvjLs@YQ`2PsH;dC$cnQDwx}<137IPwOn1p-D@hmmo7%)Uu?S)oxk@dLDWwd=7yRsv}o_^A^}LRUD{Wj8si^ z#G^D+QyooJoTYteJ+S4pA8Y!*e#ev==*ZJE4<68*bRHZ+edj6(w0}vh8;O^sN~?!H zgRes$hNeL;lPBRwwA-kuOKw%d7cu7Sp+7M$oOvmq6&JwN_D{`45c#DGt24noL`o*p z#kI_`#8!uQW&x~ab8os(AeG`p7N+jup^|(um{x=uql6lfUf^7(zz5yevsDZ|rUdS+ zD>ynwEz#6mjl`*rlqJBcotN9KvXK^*9@}fuW#q$SE&X>gl+lzO`A^MzZWGc+ z+AHlO;g3_V2p4Yd^*d0a2kn6d7lumH!V$OroKcA~TDeJjieTC)Yo?cSh0By;X!zXL zD-&lBRow8Gw5UR3aqD8TjG3Eo)rKdeBwscXVDEzMemW67%zM!iZd(veP*_i^q;;uD z`S>_cm5yKmJ|T5Y$tXK{6K+63=Fn-@(YYZuG*%R?r4V%V=wY=rI`MjrqNzcBJq>m4 zdyF(q;B2ezWEl24iAu91m}VXqGt&t+-L%}OZku~>i7mar&<|wZ_QY_|qyK@Z!sb|+ zWVLV=o9PxXNB9T0zRoj>QqGf0MV}V88HUklM8~m^5C)S(CM$au z=cru$o#E>({^Mwdz6>))V)?uhZ z+ymh(w@~snpe?9X@=fA}bW>Pt>hb$2Qg9B!J34}A%Xnt2O626d)VHk3N2Po*ygf6W zs%rHD{!r9$gT^AUySpoj@d61|l!9hk#uJz%j2PEAn(IXQK#iNa!OM0kNW~3Bvo|fn zO3C_q%0SvoU0{e*B1Cq#0S*#$)T!rZ=;k(?{Pf&8%C zFg**KwjJW1f7r8zdt!&$S=+KSpHREnZEQvbL$jfVzr>lwn)!t@w-bj4h z@byMlY}hY1?&Tk}7@md~xn(`GqDEF^11T#TiQfs1jq%ZzK}EKD5G6d-%Q#1;BeDJJ^+|wtrg09JgJr8#%7?}TIG{9s21f?Ma5*oVt*j@`qHraS zDA$`!(?-#NGR)jC!(gX(^&>KCHm&^4WiPc)iPR7y)+0nLpXn&85jhT*^RaQt1uCtS zlC6MZnY4r?QWx_uDIq`bU=N2cT85pD&=I!+4sG3XAnh>3;^NE=Y0as?qZO;ZsF=-a zV}-BaL}YMRNw#H)RLV5L;lUj}Nvn~gMgst!>>T+bQk@#pz`^V=BuOcuCDV<2r34>b|sWjzC z+R_j3Do*r7J?d!qSY)lK$g`DrR7t<%e2jOtTIS_*xNcIfuov&X3gyZ*6A2NQO-Dnx z2UAeznZ5a{B`{NLo~WxuqSA#9{gHT$CwH8Y&fQ_FWn27Z&3X05w5Bwmoa`SapH4EB z;0c-THEN{_2G4?xV_IsmAC(0ro`rFOGKtD@7NpZe!e?!$quuxlo7}z|zS*T0s>TJ| zjuRaPcE!{zdcraAlw>vohoV=?%P?8?g3v-b)t0IFDUI9ZqOl=mlwpV=tv%RIY%7AotIsLov0Z+ zrFstUl%ax|_+vg-4!ZhS&h0xO=*G!N#Pe$hJbfL?`EOj_+xBn26`p_X22Z`V=!wst zJzj_H&!R70z8(z3_lWE^^~`{650}oJu(CB^c0F~g5 z>7i$Nq;()1^xl-cmbc&39Pui?zt8S?HFvyy%9ZNa-Wg@e3*de>c9-Z}rI8BlKwX=G z>Ohgv9Ak8FS>A0RJ!w2bWFL4V)y9^%s(kD; z3p^&4vsGm#6TRJ?n4XUsrcdI>}1X70{t!$bYsL;M;b#|Z*G_sJ=o|9dohFy zL)MM0wxz7eLQ}5aNMpuEOD8(!A-l7vD>8zpm*1kRi@H))f|rB zA0FIa%_WeiQVoy*%WWH*QG;9C6Re}UW7~bMfDOB9)uZhR+VqtYHi$n~$s`Rqc_R{@CMZQX~iZJjUvQm@q-boO(M`GCY zME=^w-K^o+`zXku+{2)4AAWAR{~G#PwZvnnXM26YP)>1SKesPAQUkntkG{TfbRQfl z`}LaPsx%(JPCguq?KwMD2>Mxci!eHHFoDJK9oi}SF;%m#vY{~vuv7Xx3iRrWBIcwg#ix*S+mN|QH+90-K6`5fL}1^HEihb*>ApP0M`+wI1y@Dc zVQyr3R8em|NM&qo0PMYMTO2o&Fub4pSJVz<0}kDD0Bqa|zIJYyI3%2220I7ed9kKj zGb7V2btE+~w%OnQTsrh|dN|m`Np|TEHX~J)N+qePB$cH1jGbegVBX%DA}+ldilhH{ z;IGr^be?T)>i;{PPW}IG=UJX-(!1-Or|bU#56ws8mkNpae{?#ZOQ&ABf5?Xt@)~nN z81>!2jbf3AC*-lh{`5pa@7owS3gtQ3ld-$GY&B#R3btNoj}0om`qZR z6bcYJW<1t^uK`6dp_4TSAmQwSgbF+)LUJ-nr6M__!IU#f-dhAj!xHF(voy)d6`nI? zG4^39Wg`4`JMYOJN>DJx9^;d?LjuM3jW8re6D*3nGZCXG@+M^LoTnq4%j5s=dfkrK zaXU^{UCf2=xPW6sA|E0YOd~`+e1WN8^uJ>ga3$Yjy;y55HKS9jjW@3X%`|6iae#p2H}05|LZ-uilHqpttg zd!0x9|0TYqCFl#d#AGs68u*Wt@iZ2II0nijh%`wUmpEJlB7q1Xae|d0*I<1O zHrIf0c;-1vONt`LWB3SX zC_FfwlAL)IU80##by(@Ht*=?$R~;u|;Vuz8)n@cxX*j{scK~8^^@^ekL?Sebun%2T z=st;w6fk9%5FMfJc!Xu(N7CB2vZD_!3B#_DiNP4lWL0+p5sW&V?wn-eaTUP zk1;1K9O8h{Q25Z@>U4Bn3yTfHWCE%b4iw{1sG>5G7$%D1IE|v2=QwE+B8i8RBZ((7 zRY5sRun#9JiU^&&(%Ft}M&YYJ9?&-3gW4!!qnoL<570RtlL#;EcOC5@sOoT>FhL~a zvywRm^)0~;vn~N>@Di-{kP7f11N4-D(8M}gQ2XX7N;J8NRFX!~F^fnr^I>m%$mB7{ z0#m6-?P&rE0s>AX<^(M!lgN0V zslaeaVkmS+mJ?UGfx>;yC@yCZI&m>oBRKS-C3%WlpF%v7cZr75*uvMuf{0)$ z&EF=&hhBF}E1IDdkV?%ovJ$7k(aD}gl!#BTJl5zoyF|d`8afLIB%DbWu*iqgo#VN} zv)p-XHL8EWD2%WWML$?ta;7%XpYLOiqMxLrh=}PSlP4I3vwj$IECd#75aKaPBMD;^ z3DbW@C^)}FJcNM736f+)A|hvwX$q+o9ydCc;|WoH0d^0EpyomZ3)2NDSH)-@y#|+4 z5=>!=qeLim$`Qw!8VdrX{AU4|Q%tiVqs(fmJAx`g*-Va+Ho2;lK+PBEL;+znmkMxb zXjy}xEH$B!D9>C-CAokjF@=b+L~Dg+Irygj$~4)|Qtm3bHaM>InB!VcX;2^YjX&4) z$K}?(0|^}AKuT4jGNdgc3Q+%HPS{m(k3sS>>5l7$S5WrI-d7L@>o+s(jBz;@VEIuumoF zXT=!#ThX=!YF6B~& znjXkty07v@HAC?islH!c6>@Y{KHu9a$%eBS%PCGZ^CI9#IfJ>IQ3ptnochpe$5?U_ zh(-{t7F*A|8{L*>*w)VS`V*d+We7+R;lj=^IR+#pQf;96SQDobqu7U5h%a!&lqqaE zB}dAKR*uwC?ro6j-p;7RSK8u@#Y@i8L=TaUa`aQ$7q0~7HbuE1uY{T=tN}6FAyW;? z`9u^@q}?!yjkE4KN;@DLD(Wt@M})RVNKExF_X-Cl*{qeiW`$L>c}hz~sghEmT1rg= zgH%Ws14`wTaq?a{>Pt&-jD*nH0tXyRGdr7NI7j1iWM`-YrN&J@T#_hKE=Gccz@gT< zRM9CZOSnLsXm4bct8S%taYiH1)fL)RiPdb`vxRX{{G$_Qy|n_Tnw9^rHyUM4K)o@T#+v zNy&HAT+#UX@$4rhtD(+u)EGO*GYw(JvobphGEY#VyJS4n2}?!-$GYpcSt8q7@@Fer7)r*~+)%52 zgR?hQ%}%KDK!wRJ8HVr5NiT8?>nOL}RufXIl{v*B5sFCx;f$hKbrjAnh;oXp@zm5( zYB_)v)thZETvbl5Er_O3v<8&T6{fo&5L*}Jp2;=kN-#Z=O4N#ym0tlXlccBIhW^h( zRRcsrH6vg&qx15o!J3|V{udl2m<}Zh&R2D&Q8gttd?f}oFA0>vKVTvDL3!UlE8BHM z1JyQyyvIIlz9f3Ut2s57m<#wL#e4=RNsK+Ty43h)=v=uAOyD9pUj!O6y#jKat6S~1 zXqD|bI3w>n4#2zk%6O}Je=$=5m6R`a7$Z|GcL)(6WdPgn;w!gM>TX6lpD0dz=y>a1 zC!6>!Se3}!CmEiLxw zHSes>X@o__kr|FsX?7o4j8C#{qkIw2PGi4QgS@kSVwMmk4u{$mKO^DFzyBS6v-;w@ z7c2k%_vJUMEvK;IMllqo>1?^zdgy}no*V9N+r>@qUhJ%wv!mP(DSIKB>R#wW>x(gO zQhVE`26wE=#-~z!VE{%-JZ{U`kWG z#Im5-zyjn8W4e5sre*^KMUAxtNNQ{7o0<9}F=naEGoEG$V??5qhpkMQ zskRcOoURog3d#Sb3R!3TfKg&+I)*KY`2~s;FB$vBY@M2-s~JQFf1c*lomL*r}tYp6`SPCU%!8zs{legb75i%)Qft%aDb{+Zh^3qfOR>&sPKQ#U* zVv}}ks!r&{)n1~TV6LMhXn1qXGM|$ZDkO>`y}sW#;Fp&4 zG+a-G6BJ;$oMLWP5UYi)h}8vDmkMkj3Vl>2FYH>BrdyJOCPV`aM1a7En{5d-@36>k zcJ0KfL3laU-(jUeF;8(6x6SgcaoNY4=-Fm>+cWm)O~iZ+ zus89brRIF+EpIL^p}Oc7vOu&Y66d0g!kAFKf#lk~pLW(6u^Xq6BuRwbyd2TiQ4dAI zdP?LxcpQYIRJ3D^nAs!{z;T4^!p@HezvP|Ob8ei5)nm^p^RE%1?f2)#_w~edX@C8x z1B!b8)#(pDv~)*rnWWvlVgFzIgIenG(aEU~-Olsp=J#;8UqCo{57v8%jM^0ksr$6v z8PQ`x&B_y*Z8Bx+x79UYySB6{7eZi!$Ku5wC(bCE~BU@CB|X5Yq92E8?*rmDm6?!QQD4n~qI9-P`%;>~!y7;6qQZKIV+X=!(Q?Tw*9- zG)Otu`+lZI0i)CuVi+~pD3bx9S|T~?C7EPQ9rS-W+u7e69G;%-9R6}R)Czr(G>V0I zWOqR@*HJ1Fn!pH82@NX+3M&s?*iUD}Uk`UO0d$*7xkOO}LBxV{h^_1c#yJ*=4pNZ} zkl`fTDawm7)bfY~Wu|Pg4I)-G@4q@dI(~KXa&V^q-8(!ToV@Pu`_P#K7#{YIPX?#` zy~CZOSBIxQtXq)bY5#DyzkhT%I2#I0dUOO zgrhjdTzFF%M@zx1u?|5} z9nv^kaS~y>r_gQi2hDc14{v_Y-3#Rmo0ZUNQfgsab>3>BI3fwsk@6D z>&a*4sUG4B%pb#0KjSM@bzv?Kk4^_ePhQCfD&w~J|L$g|7XP`ix%uod{_{(Gr&C1F zwezdAr*#&UV^_pEGOF@9{FM6^S>%cd>nz9kJ7~mGSwapFRbJ?rr8ER2Z;r%QI?r0O zw&VP&3N5S|2&r7vEY>n&>>MI;uBSd9K0wxeOABD$IE^B`-wGaFU%MUeZg6>Q(T5M< zy*6RdS?rOXoO;I8>K|}@y>>UU-9M{%&-1jjik-Ps*uVng5@expcPjs`#VW&VTSwbq zTH1{$+TVB`-lLBGn|@9D|7?n*M7bC;d61(3x9I=g)85mX{@>hq+I`gjU*h}lq5T9d zNbKv4)iH^%oF#ZWR$>XJ*oPuX0rIfJVNS!Z5p3+0+?9iz_;Iu_2gd&gz{VswjJ zAL=t#KUwvCRfmM3u2gt`ftPGj`lW~39R-?#VAvB26aFKhm`k)oClVy0vMz_dc!!@#==RGLr zUrzrw*?$|A_Ab84_l-X5Sm5pRzh2G$KYP}DJpcVgzM8?vBZa(o@s*ctRa{>?wR7U< zgoZxs=#FxL5+{o#_4BjKY8FvZwb$2`9_3WS5m1u&i{>ZA+{COtDIK>E*ec5$I**iX zU`PW@F((pDY7p7^li2&<-F)__|G&uRy6z|G_S@Hy{y7Rh+vQ*sQ|a3! zt6c2^uyY?iW-+M$srNB;B)^VNDK<2f(v+!i^6-q=Vl*5~8Ss>y!$+V&VLf2^L)UkE z-L3AmrIXS*D*7Nn7VLl}xY;ht$wi4tVq#{*?wka7# zJ7SYE*3Qx4_XS4wOF=7jRe>~Hm&)o|;36TDZ4NvfoV*^Klz>YKzT4Z{+A0AL_6Iws zdq;;Q@Jf>Ju2&KAZAJrJ>^i!=PN!1>EcSs7WNBa6?Un(H{Zh+rv484rmf(IE^pDSm zd%xL|^Pj%kdszajZ}l4JxeZ|VX_<$Ey~DHNU^rCOR<${LWtnNn@LgD5St-~3NpE$v z#z8Z&`HUuFoeH(#VgLBdY|EbMeOW7|db01Xm$1x^ZnoQNP^xLZn+srX_w4(<{lRu? zxzx|w@r;D6YH!_pSpv*DZ2LW9UND|yu=__ZH-0=jJ~=uboSg0rhBfGjO*Yq6Y_qrXCQTvI%{K24J`<3x} z?dPP)7~Z^rWmOZ{{vTNJo~*hnZ#wSt-@jR1hTnhxR?XIFzJCmG*w^FeU;W_^XD5S$ z{^`ySZ@Rx8+;*3GAoLk=`5}5@)d?h&w zBrqNliXs?u7DFy#U4wGmG`=DdvVg=f$?6-QhlGQhK)K&6%(L3CY%Z2H_weEcEZaWl zl(SoAdqz!1wwJ3~uceWy-LitJ6z(qJ+hx}7gtX5l6+FxKt+4Z@Z>K-}7RWc>Kt9$(HaF7k9_2FGURFBBU0zus zZg+LnTQ>UCU#|HD%XPzlL!ClKamFQ&!5zbLUFTPaapCE5#-taq$po7bmb+hTuyb@c zJleMdW{zGnxqwl@BJ9a5=C(@oUbG@Mc`!QRLIrXUB4ZNa?Ml+UNyO+%Q4!=l-tKKa zJ^0tBP#B@$JWU7<@zr*>b1w?Y$lwPNNKUCLl{1}wN7)$9Lv_J@(*ys#{GqXHUf-D* zN)k*%%stzCw@YWH48)GNE9rl=^PzkF-(T(P#P}<^ zfRmCl1}=rxauIynf-In~=7?z@vsCXwzE+c(Tjad|Pycs}F+V_rHv3wdR?FQDSnP?E zdNHDCf@4gj_b$HTIA#*V&DlC2WqUbWXuSm(rK&bkAH>s0{a2?y+=e7Xq+y4{eQ=Y^&RI%^FGz8SX8h)||_zC-+yG;I420@6ZJTH%OqhoV(sFSk9b#SXT0!vqU$n z|1_=tyYXrBkSLIK>gwvgEbwvzvB`uX^WB zIo)NogoGq`j7U_6!)c&Hi(OdOA+IGxcT-$!jm(S6ZvYWtoIrQ;+giHWQw1~4+`e^( z#VBJV+GShvx0YRw=mcj2O7G2@BN}z)tOxq__Vx=cZMeSvdX6m%p!AF>IoWR$?Z&xJz zx>o+jj}OIy6=23it97HCl+byc2}%n`{0*dYBhT}^&gliPCw59dOWo}p&(>hsT;Jty zo9>6{#r%lKO}#>akM@eDQmmS1V%6q)tEO!*O)($gkCfH>btBuwNg_u{*+fBP+SXo5 zmU z?xpI z^nb6{?arP5+k8C#_eH*k(f|KcKd+`;vyOk8`ghHJ^0%Ose6+=Szrh5UH&+@?VXKUa z&c12>-MCbBgZeH_i?hjf^^8D`>AM^nY2dGJRjRmu7aKEITTH{7>Rt5FPpy4H;cqU3 zN1OY<>bt@I+w0*ze-HQ;`@jCIvpzTf+k8C#@kKtX(hv=AQRl@TQhtG6^$NsArv-~! z2EBlB%!IxvuI_7CBF!$x%|~ksWmVZETf}B@eWJ!I-G^A&DZ3+jWj2y|Jq@cf3+d(8 zEmmEG%o)HmOc;+@&YPIP7CPbCeFnewHjt91(Dj-op+{G!7Gz9 z7k=J@jzVZ?z&o$(HT1~)F6`wUX*ge~g38Nu=LqEeuf)9Tx{h<>Xr6ac{?=os!S8~UGR{~P_k;+EJi5dYVG*6Gfj|6PB& z@#z13iLb2HQId#~61Tlz__*O`b;#kpwm`C%tt+Od{K)h@_0FHSnFvsSxq05kRUvBj z>~9uRrjH|Q@s+vSjT9Hkequ$aDKql zK#CEw^;d^<&c$>V7r{gR{q=#JbIa^D&a*lAQ&Ba2*_#U-GhWo4qs1Bn$UbS8v@Yo8 z<#tV?2tKRt5ENt@A2Kl?zADC2T{N|1#ZE0K)+E+nqh#k5pvI>`dYuYP?SYW>YHt0R zkj+Z&{97TtO8%l3L(JFw9Lk?39DTpXKS(~cxXq$ZmQKnp47Y6bUKS3gn@!5!CM2}P zs(skBvTJ6ZQ>_(Pt)32ZHIUvpp4~L;SM-&EIkdhwKvhVQSEq+7B zg5*pBMw#BwmQ`)*IQQ6cu9j50axnWE+?y)NTb!LH;QP5gO%xWLa9O_#b@7dIA9_6p|iQU*|1K0!{q6<$xXwo#!#tpZ_#NL*iZOj|Cqg7X&31uJ zwcWYB9GGXOypRw2ElD9%H>%iww-SL-;-r9Y(oQ8cRM5&E?W}bj$#}lU*HDlk?;DD^ zK}MFKURrNx!4>?6e!Sr})YUR>Im52zx2la7349B}qWV?YZ7uGEbDiYziuV7jUlIS| zO>q>H31u8Va2dD6|8_R#;y>1VPap4p`Xb*F*n8#lL%OE<;c|*8j8YPXgiatq!8w{> z;Wtc%qYCx8A3?7aJ&hTZT+ud-|0eiax z;dlvW?{FX;kAxVt4IF3h{)zgt82?8<9}Z52e?}R%jsMQG`u%S`mGn6Nzrcr*gtH4B z7vMsKf+@jU>y8WO>~ft7hvN%^FBfM2)!aXRpZgW#|76hLJs5a#_~2#S692irIrsi^ zZ=?G-{=dk#1cV0Z2ua3^H<=wr+j->>+D8hNk{}@#fGChSiSz}j#*o`*^@=Bd-;^Rp ze_ye0+@I%9X?ldULl)>~h60>O(MBVoFW4uH>Qu>D6k%QoL>BE;`-n85<##!E6gU~d zo8R(h{pN}ne!D#(a+>N321~4%!pNvCImYc635j{N&|D;|p5rVn!7;~b@<{}d%2_CL z()GHXZ=5G^oK4B(Ydx*b+$CUC3o4}{=Fv=fR)mf@5|XEZOgVO*Kp(;xMKKACzy)Cx zbA7qF!mLvk8e@!N5)s6SoEcZ@O<&(STWXPYZ_~RfWU=t=ZLR<}+pD^&`YB1}Bbl&D zVfuvW4*IdukZG>QEQ;7Ap%dS6o?SO9^Y#8(dT~>hYp%*_uYzEps0K2EkRw8kyf_w#ek^;6!QA|7 z#A63gCYvhjKV*+=+F<1y`Oc+e1D1tLvAksboX|;@)?T#_cPq*1fo`BvR6Eb5^$|wF6tYD`Jjb`zAbaKlUNzE{u(U$S->J8^ z^OhdM)yhl#tUQ{g+T+WPwc{&H(_iN zVU!S2vjeVXK)5+(Ml)m5bu}-=)3HrP^X=QWj%-4Dgnq)I+lB5kzw_MR*n(cC zyQMLQ$NimwpY1I;!_)rhtD&D`JI={qc=YOIXE5}g?d=y}@1AQn-8!e}*%i%$y+i1~ z?(gmQ|Fu5=IOzWZuMSHI{r&x;p9j0JyEi;}b$q&abT|a)zZ^J43<$UtT@#LhYMN zP+u>0ET8S{l9Uo#`FHy3ahA5T|7v(TIC1xm_0PdCr-PHj{=WWwd~|ZUGF)vcsLE}A z6&t`#c0m;A3!+LG-%&>Ki;U6kR=0h+bG){`wY8=H=-EGXB}#J0zY^tc$9ulzb)P-= zx=%M3P`+6_rWwm;h)QzW9SrwQ2D<<|uTD+|ho^IGvz`%LuTx%h^0Zw$bK2A@4T5c| z!F*UoDN_&ft_N?M-Nd&*gdTyVnp>4xefHbaL=sk@y5Ds^c0cxByw&f^Yk!Z0qK;gm z>%l-NVPzVp`oVbRQ$@H?!oX8Xjh|H*D*FO~sl!Fkp6_FM9qPV^dAz*lJy;Lp4qn7u ziMpV|rI1r3wTjDL!5_`6YanVq2BM8FMx|<%J1pkPTx{Ho`81IhMhxYRJ+M(G`O)1Q z3(L_yq11+6I9IlI)5|VSZY46qqs?@JIEt~NVBBs+{;lf@Ea4o_wsmB{o3;GU-&GHs zq-utzg#ET`3Ez5-(~l(PRIl+yvo%2QYyV(hFQ1wwG-VT84^2|!bC{eLku|lQT82sn zQEs|2KFwf(@0H7Oh1AS*t;zV;40SwnGbh3wTNd)Q3<~S~7;v5qi zYV(*1%-!6pRNYTtDG8m3HN`IBnW+=nwF_9RC(3*VXv9+O`zvPQO^K)9S4uzZJaZT0 z1a`>5<$49Rj?hEOga$*MC{ zSke2=GIRezun*r@qTeGDu?x)eKCXesHdQH$b5iu12%`{lPx(@qD?jTT&-`k~$y^`( zuu3u9I^eo6BD9^~&uv}e_LwmkA^z=R+}<3n@~af$x2Bip& zy3o@}`u6Yl=kD?cat-!@M`hH6d6M=!JxxL}exV_}@3J;_v+H4Xrmx(A?4I!h7UI=! zvn%eu)o-7ff4~0Lao(T1Juu-VX3YR|mDXuU>)JHaijA0BoV)sM4r3_Qc)LVI=BlP* zg7cLE)$c~!c9U3d{y>^6XaV!~2PACXFRr@I3c|fqjq5TR&1&w)$)LactNzP4z$R^Y z+CMoR&bfLW0O+(c@AJf(aDYxhZ+%nL5V|)aY<9}<-AVz+n1*%uhXdcqd=b5Xa1X?$ zYIL91;cu??MumRwe)ae)@OxCdch73~Zm)J%n9;E_#lg9;x*T5+mWpVmEG6d3p}ey~ z>@HMl4q=ppunnE7_05f~t!G=CTX?I}S>Id(iYH7G?Yi1?ZLkgNold9YI6wDKcYYY0 z_`X$$I2>FkZ$tak{%}Tv9pmgNZV9AVMS--s4W$ **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete --purge my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release completely. + +## Configuration + +The following table lists the configurable parameters of the MySQL chart and their default values. + +| Parameter | Description | Default | +| -------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------------- | +| `args` | Additional arguments to pass to the MySQL container. | `[]` | +| `initContainer.resources` | initContainer resource requests/limits | Memory: `10Mi`, CPU: `10m` | +| `image` | `mysql` image repository. | `mysql` | +| `imageTag` | `mysql` image tag. | `5.7.14` | +| `busybox.image` | `busybox` image repository. | `busybox` | +| `busybox.tag` | `busybox` image tag. | `1.29.3` | +| `testFramework.enabled` | `test-framework` switch. | `true` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | +| `imagePullPolicy` | Image pull policy | `IfNotPresent` | +| `existingSecret` | Use Existing secret for Password details | `nil` | +| `extraVolumes` | Additional volumes as a string to be passed to the `tpl` function | | +| `extraVolumeMounts` | Additional volumeMounts as a string to be passed to the `tpl` function | | +| `extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function | | +| `mysqlRootPassword` | Password for the `root` user. Ignored if existing secret is provided | Random 10 characters | +| `mysqlUser` | Username of new user to create. | `nil` | +| `mysqlPassword` | Password for the new user. Ignored if existing secret is provided | Random 10 characters | +| `mysqlDatabase` | Name for new database to create. | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 1 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Create a volume to store data | true | +| `persistence.size` | Size of persistent volume claim | 8Gi RW | +| `persistence.storageClass` | Type of persistent volume claim | nil | +| `persistence.accessMode` | ReadWriteOnce or ReadOnly | ReadWriteOnce | +| `persistence.existingClaim` | Name of existing persistent volume | `nil` | +| `persistence.subPath` | Subdirectory of the volume to mount | `nil` | +| `persistence.annotations` | Persistent Volume annotations | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity rules for pod assignment | {} | +| `tolerations` | Pod taint tolerations for deployment | {} | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image` | Exporter image | `prom/mysqld-exporter` | +| `metrics.imageTag` | Exporter image | `v0.10.0` | +| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.resources` | Exporter resource requests/limit | `nil` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before metrics liveness probe is initiated | 15 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before metrics readiness probe is initiated | 5 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 1 | +| `metrics.flags` | Additional flags for the mysql exporter to use | `[]` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `configurationFiles` | List of mysql configuration files | `nil` | +| `configurationFilesPath` | Path of mysql configuration files | `/etc/mysql/conf.d/` | +| `securityContext.enabled` | Enable security context (mysql pod) | `false` | +| `securityContext.fsGroup` | Group ID for the container (mysql pod) | 999 | +| `securityContext.runAsUser` | User ID for the container (mysql pod) | 999 | +| `service.annotations` | Kubernetes annotations for mysql | {} | +| `service.type` | Kubernetes service type | ClusterIP | +| `service.loadBalancerIP` | LoadBalancer service IP | `""` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the mysql.fullname template | +| `ssl.enabled` | Setup and use SSL for MySQL connections | `false` | +| `ssl.secret` | Name of the secret containing the SSL certificates | mysql-ssl-certs | +| `ssl.certificates[0].name` | Name of the secret containing the SSL certificates | `nil` | +| `ssl.certificates[0].ca` | CA certificate | `nil` | +| `ssl.certificates[0].cert` | Server certificate (public key) | `nil` | +| `ssl.certificates[0].key` | Server key (private key) | `nil` | +| `imagePullSecrets` | Name of Secret resource containing private registry credentials | `nil` | +| `initializationFiles` | List of SQL files which are run after the container started | `nil` | +| `timezone` | Container and mysqld timezone (TZ env) | `nil` (UTC depending on image) | +| `podAnnotations` | Map of annotations to add to the pods | `{}` | +| `podLabels` | Map of labels to add to the pods | `{}` | +| `priorityClassName` | Set pod priorityClassName | `{}` | +| `deploymentAnnotations` | Map of annotations for deployment | `{}` | +| `strategy` | Update strategy policy | `{type: "Recreate"}` | + +Some of the parameters above map to the env variables defined in the [MySQL DockerHub image](https://hub.docker.com/_/mysql/). + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mysqlRootPassword=secretpassword,mysqlUser=my-user,mysqlPassword=my-password,mysqlDatabase=my-database \ + stable/mysql +``` + +The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mysql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Persistence + +The [MySQL](https://hub.docker.com/_/mysql/) image stores the MySQL data and configurations at the `/var/lib/mysql` path of the container. + +By default a PersistentVolumeClaim is created and mounted into that directory. In order to disable this functionality +you can change the values.yaml to disable persistence and use an emptyDir instead. + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +**Notice**: You may need to increase the value of `livenessProbe.initialDelaySeconds` when enabling persistence by using PersistentVolumeClaim from PersistentVolume with varying properties. Since its IO performance has impact on the database initialization performance. The default limit for database initialization is `60` seconds (`livenessProbe.initialDelaySeconds` + `livenessProbe.periodSeconds` * `livenessProbe.failureThreshold`). Once such initialization process takes more time than this limit, kubelet will restart the database container, which will interrupt database initialization then causing persisent data in an unusable state. + +## Custom MySQL configuration files + +The [MySQL](https://hub.docker.com/_/mysql/) image accepts custom configuration files at the path `/etc/mysql/conf.d`. If you want to use a customized MySQL configuration, you can create your alternative configuration files by passing the file contents on the `configurationFiles` attribute. Note that according to the MySQL documentation only files ending with `.cnf` are loaded. + +```yaml +configurationFiles: + mysql.cnf: |- + [mysqld] + skip-host-cache + skip-name-resolve + sql-mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION + mysql_custom.cnf: |- + [mysqld] +``` + +## MySQL initialization files + +The [MySQL](https://hub.docker.com/_/mysql/) image accepts *.sh, *.sql and *.sql.gz files at the path `/docker-entrypoint-initdb.d`. +These files are being run exactly once for container initialization and ignored on following container restarts. +If you want to use initialization scripts, you can create initialization files by passing the file contents on the `initializationFiles` attribute. + + +```yaml +initializationFiles: + first-db.sql: |- + CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; + second-db.sql: |- + CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; +``` + +## SSL + +This chart supports configuring MySQL to use [encrypted connections](https://dev.mysql.com/doc/refman/5.7/en/encrypted-connections.html) with TLS/SSL certificates provided by the user. This is accomplished by storing the required Certificate Authority file, the server public key certificate, and the server private key as a Kubernetes secret. The SSL options for this chart support the following use cases: + +* Manage certificate secrets with helm +* Manage certificate secrets outside of helm + +## Manage certificate secrets with helm + +Include your certificate data in the `ssl.certificates` section. For example: + +``` +ssl: + enabled: false + secret: mysql-ssl-certs + certificates: + - name: mysql-ssl-certs + ca: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + cert: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + key: |- + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- +``` + +> **Note**: Make sure your certificate data has the correct formatting in the values file. + +## Manage certificate secrets outside of helm + +1. Ensure the certificate secret exist before installation of this chart. +2. Set the name of the certificate secret in `ssl.secret`. +3. Make sure there are no entries underneath `ssl.certificates`. + +To manually create the certificate secret from local files you can execute: +``` +kubectl create secret generic mysql-ssl-certs \ + --from-file=ca.pem=./ssl/certificate-authority.pem \ + --from-file=server-cert.pem=./ssl/server-public-key.pem \ + --from-file=server-key.pem=./ssl/server-private-key.pem +``` +> **Note**: `ca.pem`, `server-cert.pem`, and `server-key.pem` **must** be used as the key names in this generic secret. + +If you are using a certificate your configurationFiles must include the three ssl lines under [mysqld] + +``` +[mysqld] + ssl-ca=/ssl/ca.pem + ssl-cert=/ssl/server-cert.pem + ssl-key=/ssl/server-key.pem +``` diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/NOTES.txt b/ansible/roles/helm_install/files/druid/charts/mysql/templates/NOTES.txt new file mode 100755 index 0000000..d33753a --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/NOTES.txt @@ -0,0 +1,43 @@ +MySQL can be accessed via port 3306 on the following DNS name from within your cluster: +{{ template "mysql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.existingSecret }} +If you have not already created the mysql password secret: + + kubectl create secret generic {{ .Values.existingSecret }} --namespace {{ .Release.Namespace }} --from-file=./mysql-root-password --from-file=./mysql-password +{{ else }} + +To get your root password run: + + MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo) +{{- end }} + +To connect to your database: + +1. Run an Ubuntu pod that you can use as a client: + + kubectl run -i --tty ubuntu --image=ubuntu:16.04 --restart=Never -- bash -il + +2. Install the mysql client: + + $ apt-get update && apt-get install mysql-client -y + +3. Connect using the mysql cli, then provide your password: + $ mysql -h {{ template "mysql.fullname" . }} -p + +To connect to your database directly from outside the K8s cluster: + {{- if contains "NodePort" .Values.service.type }} + MYSQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}') + MYSQL_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath='{.spec.ports[0].nodePort}') + + {{- else if contains "ClusterIP" .Values.service.type }} + MYSQL_HOST=127.0.0.1 + MYSQL_PORT={{ .Values.service.port }} + + # Execute the following command to route the connection: + kubectl port-forward svc/{{ template "mysql.fullname" . }} {{ .Values.service.port }} + + {{- end }} + + mysql -h ${MYSQL_HOST} -P${MYSQL_PORT} -u root -p${MYSQL_ROOT_PASSWORD} + diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/_helpers.tpl b/ansible/roles/helm_install/files/druid/charts/mysql/templates/_helpers.tpl new file mode 100755 index 0000000..f108425 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mysql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mysql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Generate chart secret name +*/}} +{{- define "mysql.secretName" -}} +{{ default (include "mysql.fullname" .) .Values.existingSecret }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mysql.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "mysql.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/configurationFiles-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/configurationFiles-configmap.yaml new file mode 100755 index 0000000..ebed8cc --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/configurationFiles-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.configurationFiles }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mysql.fullname" . }}-configuration + namespace: {{ .Release.Namespace }} +data: +{{- range $key, $val := .Values.configurationFiles }} + {{ $key }}: |- +{{ $val | indent 4}} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/deployment.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/deployment.yaml new file mode 100755 index 0000000..cd1452d --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/deployment.yaml @@ -0,0 +1,252 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "mysql.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- with .Values.deploymentAnnotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + +spec: + strategy: +{{ toYaml .Values.strategy | indent 4 }} + selector: + matchLabels: + app: {{ template "mysql.fullname" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "mysql.fullname" . }} + release: {{ .Release.Name }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + serviceAccountName: {{ template "mysql.serviceAccountName" . }} + initContainers: + - name: "remove-lost-found" + image: "{{ .Values.busybox.image}}:{{ .Values.busybox.tag }}" + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + resources: +{{ toYaml .Values.initContainer.resources | indent 10 }} + command: ["rm", "-fr", "/var/lib/mysql/lost+found"] + volumeMounts: + - name: data + mountPath: /var/lib/mysql + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + containers: + - name: {{ template "mysql.fullname" . }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + + {{- with .Values.args }} + args: + {{- range . }} + - {{ . | quote }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + {{- if .Values.mysqlAllowEmptyPassword }} + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "true" + {{- end }} + {{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlRootPassword)) }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- if .Values.mysqlAllowEmptyPassword }} + optional: true + {{- end }} + {{- end }} + {{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlPassword)) }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-password + {{- if or .Values.mysqlAllowEmptyPassword (empty .Values.mysqlUser) }} + optional: true + {{- end }} + {{- end }} + - name: MYSQL_USER + value: {{ default "" .Values.mysqlUser | quote }} + - name: MYSQL_DATABASE + value: {{ default "" .Values.mysqlDatabase | quote }} + {{- if .Values.timezone }} + - name: TZ + value: {{ .Values.timezone }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + exec: + command: + {{- if .Values.mysqlAllowEmptyPassword }} + - mysqladmin + - ping + {{- else }} + - sh + - -c + - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}" + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + readinessProbe: + exec: + command: + {{- if .Values.mysqlAllowEmptyPassword }} + - mysqladmin + - ping + {{- else }} + - sh + - -c + - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}" + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + volumeMounts: + - name: data + mountPath: /var/lib/mysql + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.configurationFiles }} + {{- range $key, $val := .Values.configurationFiles }} + - name: configurations + mountPath: {{ $.Values.configurationFilesPath }}{{ $key }} + subPath: {{ $key }} + {{- end -}} + {{- end }} + {{- if .Values.initializationFiles }} + - name: migrations + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.ssl.enabled }} + - name: certificates + mountPath: /ssl + {{- end }} + {{- if .Values.extraVolumeMounts }} +{{ tpl .Values.extraVolumeMounts . | indent 8 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: "{{ .Values.metrics.image }}:{{ .Values.metrics.imageTag }}" + imagePullPolicy: {{ .Values.metrics.imagePullPolicy | quote }} + {{- if .Values.mysqlAllowEmptyPassword }} + command: + - 'sh' + - '-c' + - 'DATA_SOURCE_NAME="root@(localhost:3306)/" /bin/mysqld_exporter' + {{- else }} + env: + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + command: + - 'sh' + - '-c' + - 'DATA_SOURCE_NAME="root:$MYSQL_ROOT_PASSWORD@(localhost:3306)/" /bin/mysqld_exporter' + {{- end }} + {{- range $f := .Values.metrics.flags }} + - {{ $f | quote }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + livenessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + readinessProbe: + httpGet: + path: / + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} + {{- end }} + volumes: + {{- if .Values.configurationFiles }} + - name: configurations + configMap: + name: {{ template "mysql.fullname" . }}-configuration + {{- end }} + {{- if .Values.initializationFiles }} + - name: migrations + configMap: + name: {{ template "mysql.fullname" . }}-initialization + {{- end }} + {{- if .Values.ssl.enabled }} + - name: certificates + secret: + secretName: {{ .Values.ssl.secret }} + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "mysql.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.extraVolumes }} +{{ tpl .Values.extraVolumes . | indent 6 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/initializationFiles-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/initializationFiles-configmap.yaml new file mode 100755 index 0000000..38c3795 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/initializationFiles-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.initializationFiles }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mysql.fullname" . }}-initialization + namespace: {{ .Release.Namespace }} +data: +{{- range $key, $val := .Values.initializationFiles }} + {{ $key }}: |- +{{ $val | indent 4}} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/pvc.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/pvc.yaml new file mode 100755 index 0000000..39e9bf8 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/pvc.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "mysql.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/secrets.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/secrets.yaml new file mode 100755 index 0000000..d9dfd12 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/secrets.yaml @@ -0,0 +1,51 @@ +{{- if not .Values.existingSecret }} +{{- if or (not .Values.allowEmptyRootPassword) (or .Values.mysqlRootPassword .Values.mysqlPassword) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mysql.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{ if .Values.mysqlRootPassword }} + mysql-root-password: {{ .Values.mysqlRootPassword | b64enc | quote }} + {{ else }} + {{ if not .Values.allowEmptyRootPassword }} + mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{ end }} + {{ end }} + {{ if .Values.mysqlPassword }} + mysql-password: {{ .Values.mysqlPassword | b64enc | quote }} + {{ else }} + {{ if not .Values.allowEmptyRootPassword }} + mysql-password: {{ randAlphaNum 10 | b64enc | quote }} + {{ end }} + {{ end }} +{{ end }} +{{- if .Values.ssl.enabled }} +{{ if .Values.ssl.certificates }} +{{- range .Values.ssl.certificates }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: {{ template "mysql.fullname" $ }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +type: Opaque +data: + ca.pem: {{ .ca | b64enc }} + server-cert.pem: {{ .cert | b64enc }} + server-key.pem: {{ .key | b64enc }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/serviceaccount.yaml new file mode 100755 index 0000000..36ce6b3 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "mysql.serviceAccountName" . }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/servicemonitor.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/servicemonitor.yaml new file mode 100755 index 0000000..bd830be --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/servicemonitor.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mysql.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: 30s + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ include "mysql.fullname" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/svc.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/svc.yaml new file mode 100755 index 0000000..b9687f2 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/svc.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mysql.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: +{{- if .Values.service.annotations }} +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +{{- if and (.Values.metrics.enabled) (.Values.metrics.annotations) }} +{{ toYaml .Values.metrics.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.service.port }} + targetPort: mysql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9104 + targetPort: metrics + {{- end }} + selector: + app: {{ template "mysql.fullname" . }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test-configmap.yaml new file mode 100755 index 0000000..ece5a47 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test-configmap.yaml @@ -0,0 +1,23 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mysql.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + {{- if .Values.ssl.enabled | and .Values.mysqlRootPassword }} + @test "Testing SSL MySQL Connection" { + mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} --ssl-cert=/ssl/server-cert.pem --ssl-key=ssl/server-key.pem -u root -p{{ .Values.mysqlRootPassword }} + } + {{- else if .Values.mysqlRootPassword }} + @test "Testing MySQL Connection" { + mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} -u root -p{{ .Values.mysqlRootPassword }} + } + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test.yaml new file mode 100755 index 0000000..30392a9 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/templates/tests/test.yaml @@ -0,0 +1,54 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "mysql.fullname" . }}-test + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "mysql.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + {{- if .Values.ssl.enabled }} + - name: certificates + mountPath: /ssl + {{- end }} + volumes: + - name: tests + configMap: + name: {{ template "mysql.fullname" . }}-test + - name: tools + emptyDir: {} + {{- if .Values.ssl.enabled }} + - name: certificates + secret: + secretName: {{ .Values.ssl.secret }} + {{- end }} + restartPolicy: Never +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/mysql/values.yaml b/ansible/roles/helm_install/files/druid/charts/mysql/values.yaml new file mode 100755 index 0000000..cac823b --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/mysql/values.yaml @@ -0,0 +1,231 @@ +## mysql image version +## ref: https://hub.docker.com/r/library/mysql/tags/ +## +image: "mysql" +imageTag: "5.7.30" + +strategy: + type: Recreate + +busybox: + image: "busybox" + tag: "1.31.1" + +testFramework: + enabled: true + image: "dduportal/bats" + tag: "0.4.0" + +## Specify password for root user +## +## Default: random 10 character string +# mysqlRootPassword: testing + +## Create a database user +## +# mysqlUser: +## Default: random 10 character string +# mysqlPassword: + +## Allow unauthenticated access, uncomment to enable +## +# mysqlAllowEmptyPassword: true + +## Create a database +## +# mysqlDatabase: + +## Specify an imagePullPolicy (Required) +## It's recommended to change this to 'Always' if the image tag is 'latest' +## ref: http://kubernetes.io/docs/user-guide/images/#updating-images +## +imagePullPolicy: IfNotPresent + +## Additionnal arguments that are passed to the MySQL container. +## For example use --default-authentication-plugin=mysql_native_password if older clients need to +## connect to a MySQL 8 instance. +args: [] + +extraVolumes: | + # - name: extras + # emptyDir: {} + +extraVolumeMounts: | + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +# Optionally specify an array of imagePullSecrets. +# Secrets must be manually created in the namespace. +# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +# imagePullSecrets: + # - name: myRegistryKeySecretName + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + +readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + +## Persist data to a persistent volume +persistence: + enabled: true + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 8Gi + annotations: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Security context +securityContext: + enabled: false + runAsUser: 999 + fsGroup: 999 + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 100m + +# Custom mysql configuration files path +configurationFilesPath: /etc/mysql/conf.d/ + +# Custom mysql configuration files used to override default mysql settings +configurationFiles: {} +# mysql.cnf: |- +# [mysqld] +# skip-name-resolve +# ssl-ca=/ssl/ca.pem +# ssl-cert=/ssl/server-cert.pem +# ssl-key=/ssl/server-key.pem + +# Custom mysql init SQL files used to initialize the database +initializationFiles: {} +# first-db.sql: |- +# CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; +# second-db.sql: |- +# CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; + +metrics: + enabled: false + image: prom/mysqld-exporter + imageTag: v0.10.0 + imagePullPolicy: IfNotPresent + resources: {} + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "9104" + livenessProbe: + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + initialDelaySeconds: 5 + timeoutSeconds: 1 + flags: [] + serviceMonitor: + enabled: false + additionalLabels: {} + +## Configure the service +## ref: http://kubernetes.io/docs/user-guide/services/ +service: + annotations: {} + ## Specify a service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + type: ClusterIP + port: 3306 + # nodePort: 32000 + # loadBalancerIP: + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + # name: + +ssl: + enabled: false + secret: mysql-ssl-certs + certificates: +# - name: mysql-ssl-certs +# ca: |- +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# cert: |- +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# key: |- +# -----BEGIN RSA PRIVATE KEY----- +# ... +# -----END RSA PRIVATE KEY----- + +## Populates the 'TZ' system timezone environment variable +## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html +## +## Default: nil (mysql will use image's default timezone, normally UTC) +## Example: 'Australia/Sydney' +# timezone: + +# Deployment Annotations +deploymentAnnotations: {} + +# To be added to the database server pod(s) +podAnnotations: {} +podLabels: {} + +## Set pod priorityClassName +# priorityClassName: {} + + +## Init container resources defaults +initContainer: + resources: + requests: + memory: 10Mi + cpu: 10m diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/.helmignore b/ansible/roles/helm_install/files/druid/charts/postgresql/.helmignore new file mode 100755 index 0000000..a1c17ae --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/.helmignore @@ -0,0 +1,2 @@ +.git +OWNERS \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/Chart.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/Chart.yaml new file mode 100755 index 0000000..7a4910a --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 11.7.0 +deprecated: true +description: DEPRECATED Chart for PostgreSQL, an object-relational database management + system (ORDBMS) with an emphasis on extensibility and on standards-compliance. +engine: gotpl +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 8.6.4 diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/README.md b/ansible/roles/helm_install/files/druid/charts/postgresql/README.md new file mode 100755 index 0000000..c0e610b --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/README.md @@ -0,0 +1,587 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## This Helm chart is deprecated + +Given the [`stable` deprecation timeline](https://github.com/helm/charts#deprecation-timeline), the Bitnami maintained PostgreSQL Helm chart is now located at [bitnami/charts](https://github.com/bitnami/charts/). + +The Bitnami repository is already included in the Hubs and we will continue providing the same cadence of updates, support, etc that we've been keeping here these years. Installation instructions are very similar, just adding the _bitnami_ repo and using it during the installation (`bitnami/` instead of `stable/`) + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/ # Helm 3 +$ helm install --name my-release bitnami/ # Helm 2 +``` + +To update an exisiting _stable_ deployment with a chart hosted in the bitnami repository you can execute + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm upgrade my-release bitnami/ +``` + +Issues and PRs related to the chart itself will be redirected to `bitnami/charts` GitHub repository. In the same way, we'll be happy to answer questions related to this migration process in [this issue](https://github.com/helm/charts/issues/20969) created as a common place for discussion. + +## TL;DR; + +```console +$ helm install my-release stable/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release stable/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUsername` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUsername` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service, the value is evaluated as a template. | {} | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | [] | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + stable/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml stable/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +helm install postgres \ + --set image.repository=postgres \ + --set image.tag=10.6 \ + --set postgresqlDataDir=/data/pgdata \ + --set persistence.mountPath=/data/ \ + stable/postgresql +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + + - protobuf + - protobuf-c + - json-c + - geos + - proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```bash +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + + ```console +$ kubectl get svc + ``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release stable/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/ci/default-values.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/ci/default-values.yaml new file mode 100755 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/ci/shmvolume-disabled-values.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100755 index 0000000..347d3b4 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/files/README.md b/ansible/roles/helm_install/files/druid/charts/postgresql/files/README.md new file mode 100755 index 0000000..1813a2f --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/files/conf.d/README.md b/ansible/roles/helm_install/files/druid/charts/postgresql/files/conf.d/README.md new file mode 100755 index 0000000..184c187 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/ansible/roles/helm_install/files/druid/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 0000000..cba3809 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/NOTES.txt b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/NOTES.txt new file mode 100755 index 0000000..64d7353 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,81 @@ +This Helm chart is deprecated + +Given the `stable` deprecation timeline (https://github.com/helm/charts#deprecation-timeline), the Bitnami maintained Helm chart is now located at bitnami/charts (https://github.com/bitnami/charts/). + +The Bitnami repository is already included in the Hubs and we will continue providing the same cadence of updates, support, etc that we've been keeping here these years. Installation instructions are very similar, just adding the _bitnami_ repo and using it during the installation (`bitnami/` instead of `stable/`) + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/ # Helm 3 +$ helm install --name my-release bitnami/ # Helm 2 +``` + +To update an exisiting _stable_ deployment with a chart hosted in the bitnami repository you can execute + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm upgrade my-release bitnami/ +``` + +Issues and PRs related to the chart itself will be redirected to `bitnami/charts` GitHub repository. In the same way, we'll be happy to answer questions related to this migration process in this issue (https://github.com/helm/charts/issues/20969) created as a common place for discussion. + +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/_helpers.tpl b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 0000000..3ee5572 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,420 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" .Values.global.postgresql.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/configmap.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/configmap.yaml new file mode 100755 index 0000000..d2178c0 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/extended-config-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 0000000..8a41195 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/initialization-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 0000000..8eb5e05 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-configmap.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-configmap.yaml new file mode 100755 index 0000000..524aa2f --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-svc.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 0000000..c610f09 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/networkpolicy.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 0000000..ea1fc9b --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/prometheusrule.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/prometheusrule.yaml new file mode 100755 index 0000000..44f1242 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/secrets.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/secrets.yaml new file mode 100755 index 0000000..094d18b --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 0000000..27e5b51 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/servicemonitor.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 0000000..f3a529a --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset-slaves.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 0000000..3290ff7 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,299 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 0000000..3390be2 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,458 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.master.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.master.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master +{{- with .Values.master.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.master.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: +{{ toYaml .Values.master.affinity | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} +{{ tpl .Values.master.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: .Values.initdbPassword + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-headless.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 0000000..5c71f46 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-read.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 0000000..d9492e2 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,31 @@ +{{- if .Values.replication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc.yaml new file mode 100755 index 0000000..0baea4a --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/templates/svc.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/values-production.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/values-production.yaml new file mode 100755 index 0000000..8da0b3d --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/values-production.yaml @@ -0,0 +1,520 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r9 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r28 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/values.schema.json b/ansible/roles/helm_install/files/druid/charts/postgresql/values.schema.json new file mode 100755 index 0000000..ac2de6e --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/druid/charts/postgresql/values.yaml b/ansible/roles/helm_install/files/druid/charts/postgresql/values.yaml new file mode 100755 index 0000000..d336ea0 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/postgresql/values.yaml @@ -0,0 +1,526 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r9 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r28 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/.helmignore b/ansible/roles/helm_install/files/druid/charts/zookeeper/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/Chart.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/Chart.yaml new file mode 100755 index 0000000..dcb59e8 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 3.5.5 +description: Centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services. +home: https://zookeeper.apache.org/ +icon: https://zookeeper.apache.org/images/zookeeper_small.gif +kubeVersion: ^1.10.0-0 +maintainers: +- email: lachlan.evenson@microsoft.com + name: lachie83 +- email: owensk@google.com + name: kow3ns +name: zookeeper +sources: +- https://github.com/apache/zookeeper +- https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +version: 2.1.4 diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/OWNERS b/ansible/roles/helm_install/files/druid/charts/zookeeper/OWNERS new file mode 100755 index 0000000..dd9facd --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/OWNERS @@ -0,0 +1,6 @@ +approvers: +- lachie83 +- kow3ns +reviewers: +- lachie83 +- kow3ns diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/README.md b/ansible/roles/helm_install/files/druid/charts/zookeeper/README.md new file mode 100755 index 0000000..c0f060e --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/README.md @@ -0,0 +1,145 @@ +# incubator/zookeeper + +This helm chart provides an implementation of the ZooKeeper [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) found in Kubernetes Contrib [Zookeeper StatefulSet](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper). + +## Prerequisites +* Kubernetes 1.10+ +* PersistentVolume support on the underlying infrastructure +* A dynamic provisioner for the PersistentVolumes +* A familiarity with [Apache ZooKeeper 3.5.x](https://zookeeper.apache.org/doc/r3.5.5/) + +## Chart Components +This chart will do the following: + +* Create a fixed size ZooKeeper ensemble using a [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/). +* Create a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-disruption-budget/) so kubectl drain will respect the Quorum size of the ensemble. +* Create a [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/) to control the domain of the ZooKeeper ensemble. +* Create a Service configured to connect to the available ZooKeeper instance on the configured client port. +* Optionally apply a [Pod Anti-Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) to spread the ZooKeeper ensemble across nodes. +* Optionally start JMX Exporter and Zookeeper Exporter containers inside Zookeeper pods. +* Optionally create a job which creates Zookeeper chroots (e.g. `/kafka1`). +* Optionally create a Prometheus ServiceMonitor for each enabled exporter container + +## Installing the Chart +You can install the chart with the release name `zookeeper` as below. + +```console +$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator +$ helm install --name zookeeper incubator/zookeeper +``` + +If you do not specify a name, helm will select a name for you. + +### Installed Components +You can use `kubectl get` to view all of the installed components. + +```console{%raw} +$ kubectl get all -l app=zookeeper +NAME: zookeeper +LAST DEPLOYED: Wed Apr 11 17:09:48 2018 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1beta1/PodDisruptionBudget +NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +zookeeper N/A 1 1 2m + +==> v1/Service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +zookeeper-headless ClusterIP None 2181/TCP,3888/TCP,2888/TCP 2m +zookeeper ClusterIP 10.98.179.165 2181/TCP 2m + +==> v1beta1/StatefulSet +NAME DESIRED CURRENT AGE +zookeeper 3 3 2m + +==> monitoring.coreos.com/v1/ServiceMonitor +NAME AGE +zookeeper 2m +zookeeper-exporter 2m +``` + +1. `statefulsets/zookeeper` is the StatefulSet created by the chart. +1. `po/zookeeper-<0|1|2>` are the Pods created by the StatefulSet. Each Pod has a single container running a ZooKeeper server. +1. `svc/zookeeper-headless` is the Headless Service used to control the network domain of the ZooKeeper ensemble. +1. `svc/zookeeper` is a Service that can be used by clients to connect to an available ZooKeeper server. +1. `servicemonitor/zookeeper` is a Prometheus ServiceMonitor which scrapes the jmx-exporter metrics endpoint +1. `servicemonitor/zookeeper-exporter` is a Prometheus ServiceMonitor which scrapes the zookeeper-exporter metrics endpoint + +## Configuration +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml incubator/zookeeper +``` + +## Default Values + +- You can find all user-configurable settings, their defaults and commentary about them in [values.yaml](values.yaml). + +## Deep Dive + +## Image Details +The image used for this chart is based on Alpine 3.9.0. + +## JVM Details +The Java Virtual Machine used for this chart is the OpenJDK JVM 8u192 JRE (headless). + +## ZooKeeper Details +The chart defaults to ZooKeeper 3.5 (latest released version). + +## Failover +You can test failover by killing the leader. Insert a key: +```console +$ kubectl exec zookeeper-0 -- bin/zkCli.sh create /foo bar; +$ kubectl exec zookeeper-2 -- bin/zkCli.sh get /foo; +``` + +Watch existing members: +```console +$ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zk-${i} $(echo stats | nc -${i}.:2181 | grep Mode); sleep 1; done; done'; + +zk-2 Mode: follower +zk-0 Mode: follower +zk-1 Mode: leader +zk-2 Mode: follower +``` + +Delete Pods and wait for the StatefulSet controller to bring them back up: +```console +$ kubectl delete po -l app=zookeeper +$ kubectl get po --watch-only +NAME READY STATUS RESTARTS AGE +zookeeper-0 0/1 Running 0 35s +zookeeper-0 1/1 Running 0 50s +zookeeper-1 0/1 Pending 0 0s +zookeeper-1 0/1 Pending 0 0s +zookeeper-1 0/1 ContainerCreating 0 0s +zookeeper-1 0/1 Running 0 19s +zookeeper-1 1/1 Running 0 40s +zookeeper-2 0/1 Pending 0 0s +zookeeper-2 0/1 Pending 0 0s +zookeeper-2 0/1 ContainerCreating 0 0s +zookeeper-2 0/1 Running 0 19s +zookeeper-2 1/1 Running 0 41s +``` + +Check the previously inserted key: +```console +$ kubectl exec zookeeper-1 -- bin/zkCli.sh get /foo +ionid = 0x354887858e80035, negotiated timeout = 30000 + +WATCHER:: + +WatchedEvent state:SyncConnected type:None path:null +bar +``` + +## Scaling +ZooKeeper can not be safely scaled in versions prior to 3.5.x + +## Limitations +* Only supports storage options that have backends for persistent volume claims. diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/NOTES.txt b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/NOTES.txt new file mode 100755 index 0000000..6c5da85 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,7 @@ +Thank you for installing ZooKeeper on your Kubernetes cluster. More information +about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/ + +Your connection string should look like: + {{ template "zookeeper.fullname" . }}-0.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},{{ template "zookeeper.fullname" . }}-1.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},... + +You can also use the client service {{ template "zookeeper.fullname" . }}:{{ .Values.service.ports.client.port }} to connect to an available ZooKeeper server. diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/_helpers.tpl b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/_helpers.tpl new file mode 100755 index 0000000..0e15107 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The name of the zookeeper headless service. +*/}} +{{- define "zookeeper.headless" -}} +{{- printf "%s-headless" (include "zookeeper.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The name of the zookeeper chroots job. +*/}} +{{- define "zookeeper.chroots" -}} +{{- printf "%s-chroots" (include "zookeeper.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-jmx-exporter.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-jmx-exporter.yaml new file mode 100755 index 0000000..79905e5 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-jmx-exporter.yaml @@ -0,0 +1,19 @@ +{{- if .Values.exporters.jmx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-jmx-exporter + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + config.yml: |- + hostPort: 127.0.0.1:{{ .Values.env.JMXPORT }} + lowercaseOutputName: {{ .Values.exporters.jmx.config.lowercaseOutputName }} + rules: +{{ .Values.exporters.jmx.config.rules | toYaml | indent 6 }} + ssl: false + startDelaySeconds: {{ .Values.exporters.jmx.config.startDelaySeconds }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-script.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-script.yaml new file mode 100755 index 0000000..2b4b44d --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/config-script.yaml @@ -0,0 +1,110 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: server +data: + ok: | + #!/bin/sh + zkServer.sh status + + ready: | + #!/bin/sh + echo ruok | nc 127.0.0.1 ${1:-2181} + + run: | + #!/bin/bash + + set -a + ROOT=$(echo /apache-zookeeper-*) + + ZK_USER=${ZK_USER:-"zookeeper"} + ZK_LOG_LEVEL=${ZK_LOG_LEVEL:-"INFO"} + ZK_DATA_DIR=${ZK_DATA_DIR:-"/data"} + ZK_DATA_LOG_DIR=${ZK_DATA_LOG_DIR:-"/data/log"} + ZK_CONF_DIR=${ZK_CONF_DIR:-"/conf"} + ZK_CLIENT_PORT=${ZK_CLIENT_PORT:-2181} + ZK_SERVER_PORT=${ZK_SERVER_PORT:-2888} + ZK_ELECTION_PORT=${ZK_ELECTION_PORT:-3888} + ZK_TICK_TIME=${ZK_TICK_TIME:-2000} + ZK_INIT_LIMIT=${ZK_INIT_LIMIT:-10} + ZK_SYNC_LIMIT=${ZK_SYNC_LIMIT:-5} + ZK_HEAP_SIZE=${ZK_HEAP_SIZE:-2G} + ZK_MAX_CLIENT_CNXNS=${ZK_MAX_CLIENT_CNXNS:-60} + ZK_MIN_SESSION_TIMEOUT=${ZK_MIN_SESSION_TIMEOUT:- $((ZK_TICK_TIME*2))} + ZK_MAX_SESSION_TIMEOUT=${ZK_MAX_SESSION_TIMEOUT:- $((ZK_TICK_TIME*20))} + ZK_SNAP_RETAIN_COUNT=${ZK_SNAP_RETAIN_COUNT:-3} + ZK_PURGE_INTERVAL=${ZK_PURGE_INTERVAL:-0} + ID_FILE="$ZK_DATA_DIR/myid" + ZK_CONFIG_FILE="$ZK_CONF_DIR/zoo.cfg" + LOG4J_PROPERTIES="$ZK_CONF_DIR/log4j.properties" + HOST=$(hostname) + DOMAIN=`hostname -d` + JVMFLAGS="-Xmx$ZK_HEAP_SIZE -Xms$ZK_HEAP_SIZE" + + APPJAR=$(echo $ROOT/*jar) + CLASSPATH="${ROOT}/lib/*:${APPJAR}:${ZK_CONF_DIR}:" + + if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then + NAME=${BASH_REMATCH[1]} + ORD=${BASH_REMATCH[2]} + MY_ID=$((ORD+1)) + else + echo "Failed to extract ordinal from hostname $HOST" + exit 1 + fi + + mkdir -p $ZK_DATA_DIR + mkdir -p $ZK_DATA_LOG_DIR + echo $MY_ID >> $ID_FILE + + echo "clientPort=$ZK_CLIENT_PORT" >> $ZK_CONFIG_FILE + echo "dataDir=$ZK_DATA_DIR" >> $ZK_CONFIG_FILE + echo "dataLogDir=$ZK_DATA_LOG_DIR" >> $ZK_CONFIG_FILE + echo "tickTime=$ZK_TICK_TIME" >> $ZK_CONFIG_FILE + echo "initLimit=$ZK_INIT_LIMIT" >> $ZK_CONFIG_FILE + echo "syncLimit=$ZK_SYNC_LIMIT" >> $ZK_CONFIG_FILE + echo "maxClientCnxns=$ZK_MAX_CLIENT_CNXNS" >> $ZK_CONFIG_FILE + echo "minSessionTimeout=$ZK_MIN_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE + echo "maxSessionTimeout=$ZK_MAX_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE + echo "autopurge.snapRetainCount=$ZK_SNAP_RETAIN_COUNT" >> $ZK_CONFIG_FILE + echo "autopurge.purgeInterval=$ZK_PURGE_INTERVAL" >> $ZK_CONFIG_FILE + echo "4lw.commands.whitelist=*" >> $ZK_CONFIG_FILE + + for (( i=1; i<=$ZK_REPLICAS; i++ )) + do + echo "server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT" >> $ZK_CONFIG_FILE + done + + rm -f $LOG4J_PROPERTIES + + echo "zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE" >> $LOG4J_PROPERTIES + echo "zookeeper.console.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES + echo "zookeeper.log.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES + echo "zookeeper.log.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES + echo "zookeeper.log.file=zookeeper.log" >> $LOG4J_PROPERTIES + echo "zookeeper.log.maxfilesize=256MB" >> $LOG4J_PROPERTIES + echo "zookeeper.log.maxbackupindex=10" >> $LOG4J_PROPERTIES + echo "zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES + echo "zookeeper.tracelog.file=zookeeper_trace.log" >> $LOG4J_PROPERTIES + echo "log4j.rootLogger=\${zookeeper.root.logger}" >> $LOG4J_PROPERTIES + echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender" >> $LOG4J_PROPERTIES + echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}" >> $LOG4J_PROPERTIES + echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout" >> $LOG4J_PROPERTIES + echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n" >> $LOG4J_PROPERTIES + + if [ -n "$JMXDISABLE" ] + then + MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain + else + MAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain" + fi + + set -x + exec java -cp "$CLASSPATH" $JVMFLAGS $MAIN $ZK_CONFIG_FILE diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/job-chroots.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/job-chroots.yaml new file mode 100755 index 0000000..3aa08c9 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/job-chroots.yaml @@ -0,0 +1,65 @@ +{{- if .Values.jobs.chroots.enabled }} +{{- $root := . }} +{{- $job := .Values.jobs.chroots }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "zookeeper.chroots" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: jobs + job: chroots +spec: + activeDeadlineSeconds: {{ $job.activeDeadlineSeconds }} + backoffLimit: {{ $job.backoffLimit }} + completions: {{ $job.completions }} + parallelism: {{ $job.parallelism }} + template: + metadata: + labels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: jobs + job: chroots + spec: + restartPolicy: {{ $job.restartPolicy }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} + containers: + - name: main + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -o + - pipefail + - -euc + {{- $port := .Values.service.ports.client.port }} + - > + sleep 15; + export SERVER={{ template "zookeeper.fullname" $root }}:{{ $port }}; + {{- range $job.config.create }} + echo '==> {{ . }}'; + echo '====> Create chroot if does not exist.'; + zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid' + || zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} create {{ . }} ""; + echo '====> Confirm chroot exists.'; + zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid'; + echo '====> Chroot exists.'; + {{- end }} + env: + {{- range $key, $value := $job.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml $job.resources | indent 12 }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/poddisruptionbudget.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100755 index 0000000..d26aad3 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: server +spec: + selector: + matchLabels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server +{{ toYaml .Values.podDisruptionBudget | indent 2 }} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service-headless.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service-headless.yaml new file mode 100755 index 0000000..3193a1b --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service-headless.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.headless" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.headless.annotations }} + annotations: +{{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + clusterIP: None +{{- if .Values.headless.publishNotReadyAddresses }} + publishNotReadyAddresses: true +{{- end }} + ports: +{{- range $key, $port := .Values.ports }} + - name: {{ $key }} + port: {{ $port.containerPort }} + targetPort: {{ $key }} + protocol: {{ $port.protocol }} +{{- end }} + selector: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service.yaml new file mode 100755 index 0000000..09fc1dc --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/service.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{- with .Values.service.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{- range $key, $value := .Values.service.ports }} + - name: {{ $key }} +{{ toYaml $value | indent 6 }} + {{- end }} +{{- if .Values.exporters.jmx.enabled }} + {{- range $key, $port := .Values.exporters.jmx.ports }} + - name: {{ $key }} + port: {{ $port.containerPort }} + targetPort: {{ $key }} + protocol: {{ $port.protocol }} + {{- end }} +{{- end}} +{{- if .Values.exporters.zookeeper.enabled }} + {{- range $key, $port := .Values.exporters.zookeeper.ports }} + - name: {{ $key }} + port: {{ $port.containerPort }} + targetPort: {{ $key }} + protocol: {{ $port.protocol }} + {{- end }} +{{- end}} + selector: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/servicemonitors.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/servicemonitors.yaml new file mode 100755 index 0000000..0a230a2 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/servicemonitors.yaml @@ -0,0 +1,56 @@ +{{- if and .Values.exporters.jmx.enabled .Values.prometheus.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "zookeeper.fullname" . }} + {{- if .Values.prometheus.serviceMonitor.namespace }} + namespace: {{ .Values.prometheus.serviceMonitor.namespace }} + {{- end }} + labels: +{{ toYaml .Values.prometheus.serviceMonitor.selector | indent 4 }} +spec: + endpoints: + {{- range $key, $port := .Values.exporters.jmx.ports }} + - port: {{ $key }} + path: {{ $.Values.exporters.jmx.path }} + interval: {{ $.Values.exporters.jmx.serviceMonitor.interval }} + scrapeTimeout: {{ $.Values.exporters.jmx.serviceMonitor.scrapeTimeout }} + scheme: {{ $.Values.exporters.jmx.serviceMonitor.scheme }} + {{- end }} + selector: + matchLabels: + app: {{ include "zookeeper.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +--- + +{{- if and .Values.exporters.zookeeper.enabled .Values.prometheus.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "zookeeper.fullname" . }}-exporter + {{- if .Values.prometheus.serviceMonitor.namespace }} + namespace: {{ .Values.prometheus.serviceMonitor.namespace }} + {{- end }} + labels: +{{ toYaml .Values.prometheus.serviceMonitor.selector | indent 4 }} +spec: + endpoints: + {{- range $key, $port := .Values.exporters.zookeeper.ports }} + - port: {{ $key }} + path: {{ $.Values.exporters.zookeeper.path }} + interval: {{ $.Values.exporters.zookeeper.serviceMonitor.interval }} + scrapeTimeout: {{ $.Values.exporters.zookeeper.serviceMonitor.scrapeTimeout }} + scheme: {{ $.Values.exporters.zookeeper.serviceMonitor.scheme }} + {{- end }} + selector: + matchLabels: + app: {{ include "zookeeper.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/statefulset.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/statefulset.yaml new file mode 100755 index 0000000..6d508a6 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,226 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: server +spec: + serviceName: {{ template "zookeeper.headless" . }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + template: + metadata: + labels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server + {{- if .Values.podLabels }} + ## Custom pod labels + {{- range $key, $value := .Values.podLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: + ## Custom pod annotations + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + spec: + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} +{{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" +{{- end }} + securityContext: +{{ toYaml .Values.securityContext | indent 8 }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} + containers: + + - name: zookeeper + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.command }} + command: {{ range . }} + - {{ . | quote }} + {{- end }} + {{- end }} + ports: +{{- range $key, $port := .Values.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} +{{- end }} + livenessProbe: + exec: + command: + - sh + - /config-scripts/ok + initialDelaySeconds: 20 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 2 + successThreshold: 1 + readinessProbe: + exec: + command: + - sh + - /config-scripts/ready + initialDelaySeconds: 20 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 2 + successThreshold: 1 + env: + - name: ZK_REPLICAS + value: {{ .Values.replicaCount | quote }} + {{- range $key, $value := .Values.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + {{- range $secret := .Values.secrets }} + {{- range $key := $secret.keys }} + - name: {{ (print $secret.name "_" $key) | upper }} + valueFrom: + secretKeyRef: + name: {{ $secret.name }} + key: {{ $key }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: data + mountPath: /data + {{- range $secret := .Values.secrets }} + {{- if $secret.mountPath }} + {{- range $key := $secret.keys }} + - name: {{ $.Release.Name }}-{{ $secret.name }} + mountPath: {{ $secret.mountPath }}/{{ $key }} + subPath: {{ $key }} + readOnly: true + {{- end }} + {{- end }} + {{- end }} + - name: config + mountPath: /config-scripts + + +{{- if .Values.exporters.jmx.enabled }} + - name: jmx-exporter + image: "{{ .Values.exporters.jmx.image.repository }}:{{ .Values.exporters.jmx.image.tag }}" + imagePullPolicy: {{ .Values.exporters.jmx.image.pullPolicy }} + ports: + {{- range $key, $port := .Values.exporters.jmx.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} + {{- end }} + livenessProbe: +{{ toYaml .Values.exporters.jmx.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.exporters.jmx.readinessProbe | indent 12 }} + env: + - name: SERVICE_PORT + value: {{ .Values.exporters.jmx.ports.jmxxp.containerPort | quote }} + {{- with .Values.exporters.jmx.env }} + {{- range $key, $value := . }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.exporters.jmx.resources | indent 12 }} + volumeMounts: + - name: config-jmx-exporter + mountPath: /opt/jmx_exporter/config.yml + subPath: config.yml +{{- end }} + +{{- if .Values.exporters.zookeeper.enabled }} + - name: zookeeper-exporter + image: "{{ .Values.exporters.zookeeper.image.repository }}:{{ .Values.exporters.zookeeper.image.tag }}" + imagePullPolicy: {{ .Values.exporters.zookeeper.image.pullPolicy }} + args: + - -bind-addr=:{{ .Values.exporters.zookeeper.ports.zookeeperxp.containerPort }} + - -metrics-path={{ .Values.exporters.zookeeper.path }} + - -zookeeper=localhost:{{ .Values.ports.client.containerPort }} + - -log-level={{ .Values.exporters.zookeeper.config.logLevel }} + - -reset-on-scrape={{ .Values.exporters.zookeeper.config.resetOnScrape }} + ports: + {{- range $key, $port := .Values.exporters.zookeeper.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} + {{- end }} + livenessProbe: +{{ toYaml .Values.exporters.zookeeper.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.exporters.zookeeper.readinessProbe | indent 12 }} + env: + {{- range $key, $value := .Values.exporters.zookeeper.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml .Values.exporters.zookeeper.resources | indent 12 }} +{{- end }} + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "zookeeper.fullname" . }} + defaultMode: 0555 + {{- range .Values.secrets }} + - name: {{ $.Release.Name }}-{{ .name }} + secret: + secretName: {{ .name }} + {{- end }} + {{- if .Values.exporters.jmx.enabled }} + - name: config-jmx-exporter + configMap: + name: {{ .Release.Name }}-jmx-exporter + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/druid/charts/zookeeper/values.yaml b/ansible/roles/helm_install/files/druid/charts/zookeeper/values.yaml new file mode 100755 index 0000000..19830ea --- /dev/null +++ b/ansible/roles/helm_install/files/druid/charts/zookeeper/values.yaml @@ -0,0 +1,300 @@ +## As weighted quorums are not supported, it is imperative that an odd number of replicas +## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7. +## +## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set +replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7) + +podDisruptionBudget: + maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions. + +terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully. + +updateStrategy: + type: RollingUpdate + +## refs: +## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1 +image: + repository: zookeeper # Container image repository for zookeeper container. + tag: 3.5.5 # Container image tag for zookeeper container. + pullPolicy: IfNotPresent # Image pull criteria for zookeeper container. + +service: + type: ClusterIP # Exposes zookeeper on a cluster-internal IP. + annotations: {} # Arbitrary non-identifying metadata for zookeeper service. + ## AWS example for use with LoadBalancer service type. + # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local + # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # service.beta.kubernetes.io/aws-load-balancer-internal: "true" + ports: + client: + port: 2181 # Service port number for client port. + targetPort: client # Service target port for client port. + protocol: TCP # Service port protocol for client port. + +## Headless service. +## +headless: + annotations: {} + # publishNotReadyAddresses, default false for backward compatibility + # set to true to register DNS entries for unready pods, which helps in rare + # occasions when cluster is unable to be created, DNS caching is enforced + # or pods are in persistent crash loop + publishNotReadyAddresses: false + +ports: + client: + containerPort: 2181 # Port number for zookeeper container client port. + protocol: TCP # Protocol for zookeeper container client port. + election: + containerPort: 3888 # Port number for zookeeper container election port. + protocol: TCP # Protocol for zookeeper container election port. + server: + containerPort: 2888 # Port number for zookeeper container server port. + protocol: TCP # Protocol for zookeeper container server port. + +resources: {} # Optionally specify how much CPU and memory (RAM) each zookeeper container needs. + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +priorityClassName: "" + +nodeSelector: {} # Node label-values required to run zookeeper pods. + +tolerations: [] # Node taint overrides for zookeeper pods. + +affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods. + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - topologyKey: "kubernetes.io/hostname" + # labelSelector: + # matchLabels: + # release: zookeeper + +podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods. + # prometheus.io/scrape: "true" + # prometheus.io/path: "/metrics" + # prometheus.io/port: "9141" + +podLabels: {} # Key/value pairs that are attached to zookeeper pods. + # team: "developers" + # service: "zookeeper" + +securityContext: + fsGroup: 1000 + runAsUser: 1000 + +## Useful, if you want to use an alternate image. +command: + - /bin/bash + - -xec + - /config-scripts/run + +## Useful if using any custom authorizer. +## Pass any secrets to the kafka pods. Each secret will be passed as an +## environment variable by default. The secret can also be mounted to a +## specific path (in addition to environment variable) if required. Environment +## variable names are generated as: `_` (All upper case) +# secrets: +# - name: myKafkaSecret +# keys: +# - username +# - password +# # mountPath: /opt/kafka/secret +# - name: myZkSecret +# keys: +# - user +# - pass +# mountPath: /opt/zookeeper/secret + +persistence: + enabled: true + ## zookeeper data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 5Gi + +## Exporters query apps for metrics and make those metrics available for +## Prometheus to scrape. +exporters: + + jmx: + enabled: false + image: + repository: sscaling/jmx-prometheus-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + config: + lowercaseOutputName: false + ## ref: https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml + rules: + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + labels: + replicaId: "$2" + memberType: "$3" + startDelaySeconds: 30 + env: {} + resources: {} + path: /metrics + ports: + jmxxp: + containerPort: 9404 + protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: jmxxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + readinessProbe: + httpGet: + path: /metrics + port: jmxxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + serviceMonitor: + interval: 30s + scrapeTimeout: 30s + scheme: http + + zookeeper: + ## refs: + ## - https://github.com/carlpett/zookeeper_exporter + ## - https://hub.docker.com/r/josdotso/zookeeper-exporter/ + ## - https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics + enabled: false + image: + repository: josdotso/zookeeper-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + config: + logLevel: info + resetOnScrape: "true" + env: {} + resources: {} + path: /metrics + ports: + zookeeperxp: + containerPort: 9141 + protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: zookeeperxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + readinessProbe: + httpGet: + path: /metrics + port: zookeeperxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + serviceMonitor: + interval: 30s + scrapeTimeout: 30s + scheme: http + +## ServiceMonitor configuration in case you are using Prometheus Operator +prometheus: + serviceMonitor: + ## If true a ServiceMonitor for each enabled exporter will be installed + enabled: false + ## The namespace where the ServiceMonitor(s) will be installed + # namespace: monitoring + ## The selector the Prometheus instance is searching for + ## [Default Prometheus Operator selector] (https://github.com/helm/charts/blob/f5a751f174263971fafd21eee4e35416d6612a3d/stable/prometheus-operator/templates/prometheus/prometheus.yaml#L74) + selector: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +env: + + ## Options related to JMX exporter. + ## ref: https://github.com/apache/zookeeper/blob/master/bin/zkServer.sh#L36 + JMXAUTH: "false" + JMXDISABLE: "false" + JMXPORT: 1099 + JMXSSL: "false" + + ## The port on which the server will accept client requests. + ZOO_PORT: 2181 + + ## The number of Ticks that an ensemble member is allowed to perform leader + ## election. + ZOO_INIT_LIMIT: 5 + + ZOO_TICK_TIME: 2000 + + ## The maximum number of concurrent client connections that + ## a server in the ensemble will accept. + ZOO_MAX_CLIENT_CNXNS: 60 + + ## The number of Tick by which a follower may lag behind the ensembles leader. + ZK_SYNC_LIMIT: 10 + + ## The number of wall clock ms that corresponds to a Tick for the ensembles + ## internal time. + ZK_TICK_TIME: 2000 + + ZOO_AUTOPURGE_PURGEINTERVAL: 0 + ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3 + ZOO_STANDALONE_ENABLED: false + +jobs: + ## ref: http://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkSessions + chroots: + enabled: false + activeDeadlineSeconds: 300 + backoffLimit: 5 + completions: 1 + config: + create: [] + # - /kafka + # - /ureplicator + env: [] + parallelism: 1 + resources: {} + restartPolicy: Never diff --git a/ansible/roles/helm_install/files/druid/install.txt b/ansible/roles/helm_install/files/druid/install.txt new file mode 100644 index 0000000..5f3f942 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/install.txt @@ -0,0 +1,3 @@ +helm uninstall druid -n dsk-middle +helm install druid . -n dsk-middle -f override-values.yaml --create-namespace +helm upgrade druid . -n dsk-middle -f override-values.yaml diff --git a/ansible/roles/helm_install/files/druid/override-values.yaml b/ansible/roles/helm_install/files/druid/override-values.yaml new file mode 100644 index 0000000..4b14021 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/override-values.yaml @@ -0,0 +1,225 @@ +configVars: + druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service", "prometheus-emitter","druid-s3-extensions"]' + druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid + # integration druid exporter configuration + druid_emitter: prometheus + druid_emitter_prometheus_strategy: exporter + druid_emitter_prometheus_port: "9000" + druid_monitoring_monitors: '["org.apache.druid.java.util.metrics.JvmMonitor", "org.apache.druid.java.util.metrics.JvmThreadsMonitor"]' + + # 폴더 생성이 이상함. 옵션 추후 다시 확인 필요 + druid_storage_type: s3 + druid_storage_bucket: druid.dev.datasaker.io + druid_storage_baseKey: druid-data/segments + druid_s3_accessKey: + druid_s3_secretKey: + AWS_REGION: "ap-northeast-2" + druid_s3_forceGlobalBucketAccessEnabled: "false" + druid_storage_disableAcl: "true" + druid_indexer_logs_type: s3 + druid_indexer_logs_s3Bucket: druid.dev.datasaker.io + druid_indexer_logs_s3Prefix: druid-data/logs + druid_indexer_logs_disableAcl: "true" + druid_s3_endpoint_signingRegion: "ap-northeast-2" + druid_s3_endpoint_url: "https://s3.ap-northeast-2.amazonaws.com/druid.dev.datasaker.io/druid-data" + druid_s3_protocol: "https" + druid_s3_enablePathStyleAccess: "true" + +broker: + config: + DRUID_XMX: 8g + DRUID_XMS: 8g + DRUID_MAXDIRECTMEMORYSIZE: 12g + JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof" + druid_server_http_maxSubqueryRows: "1000000" + druid_server_http_numThreads: 60 + druid_broker_http_numConnections: 50 + druid_broker_http_maxQueuedBytes: '10MiB' + druid_processing_numMergeBuffers: 6 + druid_processing_buffer_sizeBytes: '500MiB' + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - middle + +coordinator: + config: + DRUID_XMX: 8g + DRUID_XMS: 8g + JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof" + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - middle + +overlord: + javaOpts: "-Xms4G -Xmx4G" + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - middle + +historical: + config: + DRUID_XMX: 8g + DRUID_XMS: 8g + DRUID_MAXDIRECTMEMORYSIZE: 12g + JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof" + druid_server_http_numThreads: 60 + druid_processing_numThreads: 16 + druid_processing_numMergeBuffers: 4 + druid_processing_buffer_sizeBytes: '500MiB' + druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"300g"}]' + druid_server_maxSize: '800g' + druid_historical_cache_useCache: true + druid_historical_cache_populateCache: true + druid_cache_type: 'caffeine' + druid_cache_sizeInBytes: '256MiB' + + persistence: + enabled: false + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - large + +middleManager: + config: + DRUID_XMX: 128m + DRUID_XMS: 128m + JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof" + druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=4g", "-XX:+UseG1GC", "-Duser. timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul. LogManager"]' + druid_indexer_task_baseTaskDir: var/druid/task + druid_worker_capacity: 20 + druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '500MiB' + druid_indexer_fork_property_druid_processing_numThreads: 4 + druid_indexer_fork_property_druid_processing_numMergeBuffers: 2 + + persistence: + enabled: false + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - large + +router: + config: + DRUID_XMX: 1g + DRUID_XMS: 1g + DRUID_MAXDIRECTMEMORYSIZE: 3g + JAVA_OPTS: "-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/logs/druid/historical.hprof" + + serviceType: NodePort + # templates/router/service.yaml에서 nodePort 속성 추가 + nodePort: 30888 + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - small + +# ------------------------------------------------------------------------------ +# Zookeeper: +# ------------------------------------------------------------------------------ +zookeeper: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + +# ------------------------------------------------------------------------------ +# postgres: +# ------------------------------------------------------------------------------ +postgresql: + master: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + slave: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + diff --git a/ansible/roles/helm_install/files/druid/override-values.yaml_221206 b/ansible/roles/helm_install/files/druid/override-values.yaml_221206 new file mode 100644 index 0000000..8a0272f --- /dev/null +++ b/ansible/roles/helm_install/files/druid/override-values.yaml_221206 @@ -0,0 +1,175 @@ +configVars: + druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]' + druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid + # integration druid exporter configuration + druid_emitter: http + druid_emitter_http_recipientBaseUrl: http://prometheus-druid-exporter:8080/druid + +broker: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + DRUID_MAXDIRECTMEMORYSIZE: 1g + druid_server_http_maxSubqueryRows: "1000000" + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +coordinator: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +overlord: + javaOpts: "-Xms4G -Xmx4G" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +historical: + config: + DRUID_XMX: 10g + DRUID_XMS: 10g + druid_processing_numThreads: 3 + druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]' + druid_server_maxSize: '500g' + persistence: + size: "500Gi" + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +middleManager: + config: + DRUID_XMX: 1g + DRUID_XMS: 1g + druid_indexer_runner_javaOptsArray: '["-server", "-Xms4g", "-Xmx4g", "-XX:MaxDirectMemorySize=6g", "-XX:+UseStringDeduplication", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]' + druid_worker.capacity: 8 + druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB' + druid_indexer_fork_property_druid_processing_numThreads: 1 + druid_indexer_fork_property_druid_processing_numMergeBuffers: 2 + persistence: + size: "500Gi" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + +router: + serviceType: NodePort + # templates/router/service.yaml에서 nodePort 속성 추가 + nodePort: 30888 + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + +# ------------------------------------------------------------------------------ +# Zookeeper: +# ------------------------------------------------------------------------------ +zookeeper: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + +# ------------------------------------------------------------------------------ +# postgres: +# ------------------------------------------------------------------------------ +postgresql: + master: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + slave: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid diff --git a/ansible/roles/helm_install/files/druid/override-values.yaml_221207 b/ansible/roles/helm_install/files/druid/override-values.yaml_221207 new file mode 100644 index 0000000..20f4f45 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/override-values.yaml_221207 @@ -0,0 +1,194 @@ +configVars: + druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service", "druid-s3-extensions"]' + druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid + # integration druid exporter configuration + druid_emitter: http + druid_emitter_http_recipientBaseUrl: http://prometheus-druid-exporter:8080/druid + + # 폴더 생성이 이상함. 옵션 추후 다시 확인 필요 + druid_storage_type: s3 + druid_storage_bucket: druid.dev.datasaker.io + druid_storage_baseKey: druid-data/segments + druid_s3_accessKey: + druid_s3_secretKey: + AWS_REGION: "ap-northeast-2" + druid_s3_forceGlobalBucketAccessEnabled: "false" + druid_storage_disableAcl: "true" + druid_indexer_logs_type: s3 + druid_indexer_logs_s3Bucket: druid.dev.datasaker.io + druid_indexer_logs_s3Prefix: druid-data/logs + druid_indexer_logs_disableAcl: "true" + druid_s3_endpoint_signingRegion: "ap-northeast-2" + druid_s3_endpoint_url: "https://s3.ap-northeast-2.amazonaws.com/druid.dev.datasaker.io/druid-data" + druid_s3_protocol: "https" + druid_s3_enablePathStyleAccess: "true" + +broker: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + DRUID_MAXDIRECTMEMORYSIZE: 1g + druid_server_http_maxSubqueryRows: "1000000" + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-b + +coordinator: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-b + +overlord: + javaOpts: "-Xms4G -Xmx4G" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-b + +historical: + config: + DRUID_XMX: 10g + DRUID_XMS: 10g + druid_processing_numThreads: 3 + druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]' + druid_server_maxSize: '500g' + persistence: + enabled: false + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-c + +middleManager: + config: + DRUID_XMX: 1g + DRUID_XMS: 1g + druid_indexer_runner_javaOptsArray: '["-server", "-Xms4g", "-Xmx4g", "-XX:MaxDirectMemorySize=6g", "-XX:+UseStringDeduplication", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]' + druid_worker.capacity: 8 + druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB' + druid_indexer_fork_property_druid_processing_numThreads: 1 + druid_indexer_fork_property_druid_processing_numMergeBuffers: 2 + persistence: + enabled: false + + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-c + +router: + serviceType: NodePort + # templates/router/service.yaml에서 nodePort 속성 추가 + nodePort: 30888 + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - dev-data-druid-b + +# ------------------------------------------------------------------------------ +# Zookeeper: +# ------------------------------------------------------------------------------ +zookeeper: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + +# ------------------------------------------------------------------------------ +# postgres: +# ------------------------------------------------------------------------------ +postgresql: + master: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + slave: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + diff --git a/ansible/roles/helm_install/files/druid/override-values.yaml_old b/ansible/roles/helm_install/files/druid/override-values.yaml_old new file mode 100644 index 0000000..299dea3 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/override-values.yaml_old @@ -0,0 +1,171 @@ +configVars: + druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-extraction-namespace", "druid-kafka-indexing-service"]' + druid_metadata_storage_connector_connectURI: jdbc:postgresql://druid-postgresql:5432/druid + +broker: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + DRUID_MAXDIRECTMEMORYSIZE: 1g + druid_server_http_maxSubqueryRows: "1000000" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +coordinator: + config: + DRUID_XMX: 4g + DRUID_XMS: 4g + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +overlord: + javaOpts: "-Xms4G -Xmx4G" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +historical: + config: + DRUID_XMX: 2g + DRUID_XMS: 2g + druid_processing_numThreads: 3 + druid_segmentCache_locations: '[{"path":"/opt/druid/var/druid/segment-cache","maxSize":"500g"}]' + druid_server_maxSize: '500g' + persistence: + size: "500Gi" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +middleManager: + config: + DRUID_XMX: 9g + DRUID_XMS: 9g + druid_indexer_runner_javaOptsArray: '["-server", "-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=3g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]' + druid_worker.capacity: 8 + druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '330MiB' + druid_indexer_fork_property_druid_processing_numThreads: 1 + druid_indexer_fork_property_druid_processing_numMergeBuffers: 2 + persistence: + size: "500Gi" + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +router: + serviceType: NodePort + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +# ------------------------------------------------------------------------------ +# Zookeeper: +# ------------------------------------------------------------------------------ +# zookeeper: +zookeeper: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + +# ------------------------------------------------------------------------------ +# postgres: +# ------------------------------------------------------------------------------ +# postgresql: +postgresql: + master: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + slave: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + + + + +# Secrets diff --git a/ansible/roles/helm_install/files/druid/templates/NOTES.txt b/ansible/roles/helm_install/files/druid/templates/NOTES.txt new file mode 100644 index 0000000..be1c96f --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/NOTES.txt @@ -0,0 +1,38 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +1. Get the router URL by running these commands: +{{- if .Values.router.ingress.enabled }} +{{- range .Values.router.ingress.hosts }} + http{{ if $.Values.router.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.router.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.router.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "druid.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.router.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "druid.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "druid.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.router.port }} +{{- else if contains "ClusterIP" .Values.router.serviceType }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "druid.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.router.port }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/_helpers.tpl b/ansible/roles/helm_install/files/druid/templates/_helpers.tpl new file mode 100644 index 0000000..bb47149 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/_helpers.tpl @@ -0,0 +1,100 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "druid.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "druid.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "druid.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified historical name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.historical.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.historical.name }} +{{- end -}} + +{{/* +Create a default fully qualified middleManager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.middleManager.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.middleManager.name }} +{{- end -}} + + +{{/* +Create a default fully qualified broker name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.broker.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.broker.name }} +{{- end -}} + +{{/* +Create a default fully qualified overlord name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.overlord.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.overlord.name }} +{{- end -}} + +{{/* +Create a default fully qualified coordinator name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.coordinator.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.coordinator.name }} +{{- end -}} + +{{/* +Create a default fully qualified router name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "druid.router.fullname" -}} +{{ template "druid.fullname" . }}-{{ .Values.router.name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/druid/templates/broker/deployment.yaml b/ansible/roles/helm_install/files/druid/templates/broker/deployment.yaml new file mode 100644 index 0000000..ab049dd --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/broker/deployment.yaml @@ -0,0 +1,99 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.broker.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "druid.broker.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.broker.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.broker.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.broker.name }} + template: + metadata: + labels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.broker.name }} + {{- with .Values.broker.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "broker" ] + env: + {{- range $key, $val := .Values.broker.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + ports: + - name: http + containerPort: {{ .Values.broker.port }} + protocol: TCP + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - name: metric + containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }} + protocol: TCP + {{ end }} + livenessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.broker.port }} + readinessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.broker.port }} + resources: +{{ toYaml .Values.broker.resources | indent 12 }} + {{- with .Values.broker.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.broker.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.broker.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/broker/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/broker/ingress.yaml new file mode 100644 index 0000000..df27d7e --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/broker/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.broker.ingress.enabled -}} +{{- $fullName := include "druid.broker.fullname" . -}} +{{- $ingressPath := .Values.broker.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.broker.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.broker.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.broker.ingress.tls }} + tls: + {{- range .Values.broker.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.broker.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/broker/service.yaml b/ansible/roles/helm_install/files/druid/templates/broker/service.yaml new file mode 100644 index 0000000..d409aa1 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/broker/service.yaml @@ -0,0 +1,48 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.broker.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.broker.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.broker.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.broker.serviceType }} + ports: + - port: {{ .Values.broker.port }} + targetPort: http + protocol: TCP + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - port: {{ .Values.configVars.druid_emitter_prometheus_port }} + targetPort: metric + protocol: TCP + name: metric + {{ end }} + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.broker.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/configmap.yaml b/ansible/roles/helm_install/files/druid/templates/configmap.yaml new file mode 100644 index 0000000..85e6203 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/configmap.yaml @@ -0,0 +1,52 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.configMap.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "druid.name" . }} + labels: + app: {{ template "druid.name" . }} + chart: {{ template "druid.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ toYaml .Values.configVars | indent 2 }} +{{- if .Values.zookeeper.enabled }} + druid_zk_service_host: {{ .Release.Name }}-zookeeper-headless:2181 +{{- else }} + druid_zk_service_host: {{ .Values.zkHosts }} +{{- end }} +{{- if .Values.mysql.enabled }} + druid_metadata_storage_type: mysql + druid_metadata_storage_connector_connectURI: jdbc:mysql://{{ .Release.Name }}-mysql:3306/{{ .Values.mysql.mysqlDatabase}} + druid_metadata_storage_connector_user: {{ .Values.mysql.mysqlUser }} + druid_metadata_storage_connector_password: {{ .Values.mysql.mysqlPassword }} +{{- end }} +{{- if .Values.postgresql.enabled }} + druid_metadata_storage_type: postgresql + druid_metadata_storage_connector_connectURI: jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port}}/{{ .Values.postgresql.postgresqlDatabase }} + druid_metadata_storage_connector_user: {{ .Values.postgresql.postgresqlUsername }} + druid_metadata_storage_connector_password: {{ .Values.postgresql.postgresqlPassword }} +{{- end }} +{{- if .Values.gCloudStorage.enabled }} + GOOGLE_APPLICATION_CREDENTIALS: /var/secrets/google/key.json +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/templates/coordinator/deployment.yaml b/ansible/roles/helm_install/files/druid/templates/coordinator/deployment.yaml new file mode 100644 index 0000000..876a3a0 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/coordinator/deployment.yaml @@ -0,0 +1,110 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.coordinator.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "druid.coordinator.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.coordinator.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.coordinator.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.coordinator.name }} + template: + metadata: + labels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.coordinator.name }} + {{- with .Values.coordinator.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "coordinator" ] + env: + {{- range $key, $val := .Values.coordinator.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + ports: + - name: http + containerPort: {{ .Values.coordinator.port }} + protocol: TCP + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - name: metric + containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }} + protocol: TCP + {{ end }} + livenessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.coordinator.port }} + readinessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.coordinator.port }} + resources: +{{ toYaml .Values.coordinator.resources | indent 12 }} + volumeMounts: + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + mountPath: /var/secrets/google + {{- end }} + volumes: + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + secret: + secretName: {{ .Values.gCloudStorage.secretName }} + {{- end }} + {{- with .Values.coordinator.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.coordinator.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.coordinator.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/coordinator/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/coordinator/ingress.yaml new file mode 100644 index 0000000..8d64ea7 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/coordinator/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.coordinator.ingress.enabled -}} +{{- $fullName := include "druid.coordinator.fullname" . -}} +{{- $ingressPath := .Values.coordinator.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.coordinator.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.coordinator.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.coordinator.ingress.tls }} + tls: + {{- range .Values.coordinator.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.coordinator.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/coordinator/service.yaml b/ansible/roles/helm_install/files/druid/templates/coordinator/service.yaml new file mode 100644 index 0000000..2d72a42 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/coordinator/service.yaml @@ -0,0 +1,48 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.coordinator.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.coordinator.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.coordinator.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.coordinator.serviceType }} + ports: + - port: {{ .Values.coordinator.port }} + targetPort: http + protocol: TCP + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - port: {{ .Values.configVars.druid_emitter_prometheus_port }} + targetPort: metric + protocol: TCP + name: metric + {{ end }} + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.coordinator.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/historical/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/historical/ingress.yaml new file mode 100644 index 0000000..466e3c5 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/historical/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.historical.ingress.enabled -}} +{{- $fullName := include "druid.historical.fullname" . -}} +{{- $ingressPath := .Values.historical.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.historical.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.historical.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.historical.ingress.tls }} + tls: + {{- range .Values.historical.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.historical.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/historical/pdb.yaml b/ansible/roles/helm_install/files/druid/templates/historical/pdb.yaml new file mode 100644 index 0000000..13b0000 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/historical/pdb.yaml @@ -0,0 +1,43 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.historical.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "druid.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: {{ .Values.historical.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "druid.historical.fullname" . }} +spec: +{{- if .Values.historical.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.historical.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.historical.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.historical.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "druid.name" . }} + component: {{ .Values.historical.name }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/templates/historical/service.yaml b/ansible/roles/helm_install/files/druid/templates/historical/service.yaml new file mode 100644 index 0000000..c34fb90 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/historical/service.yaml @@ -0,0 +1,48 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.historical.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.historical.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.historical.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.historical.serviceType }} + ports: + - port: {{ .Values.historical.port }} + targetPort: http + protocol: TCP + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - port: {{ .Values.configVars.druid_emitter_prometheus_port }} + targetPort: metric + protocol: TCP + name: metric + {{ end }} + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.historical.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/historical/statefulset.yaml b/ansible/roles/helm_install/files/druid/templates/historical/statefulset.yaml new file mode 100644 index 0000000..7b29903 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/historical/statefulset.yaml @@ -0,0 +1,164 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.historical.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "druid.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: {{ .Values.historical.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "druid.historical.fullname" . }} +spec: + serviceName: {{ template "druid.historical.fullname" . }} + replicas: {{ .Values.historical.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.historical.name }} + template: + metadata: + labels: + app: {{ template "druid.name" . }} + component: {{ .Values.historical.name }} + release: {{ .Release.Name }} + {{- with .Values.historical.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if or .Values.historical.antiAffinity .Values.historical.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.historical.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "druid.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.historical.name }}" + {{- else if eq .Values.historical.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "druid.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.historical.name }}" + {{- end }} + {{- with .Values.historical.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.historical.nodeSelector }} + nodeSelector: +{{ toYaml .Values.historical.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.historical.securityContext }} + securityContext: +{{ toYaml .Values.historical.securityContext | indent 8 }} +{{- end }} +{{- if .Values.historical.tolerations }} + tolerations: +{{ toYaml .Values.historical.tolerations | indent 8 }} +{{- end }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }} + containers: + - name: druid + args: [ "historical" ] + env: + {{- range $key, $val := .Values.historical.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + resources: +{{ toYaml .Values.historical.resources | indent 12 }} + livenessProbe: + initialDelaySeconds: {{ .Values.historical.livenessProbeInitialDelaySeconds }} + httpGet: + path: /status/health + port: {{ .Values.historical.port }} + readinessProbe: + initialDelaySeconds: {{ .Values.historical.readinessProbeInitialDelaySeconds }} + httpGet: + path: /status/health + port: {{ .Values.historical.port }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: {{ .Values.historical.port }} + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - name: metric + containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }} + protocol: TCP + {{ end }} + volumeMounts: + - mountPath: /opt/druid/var/druid/ + name: data + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + mountPath: /var/secrets/google + {{- end }} + volumes: + {{- if not .Values.historical.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + secret: + secretName: {{ .Values.gCloudStorage.secretName }} + {{- end }} + updateStrategy: + type: {{ .Values.historical.updateStrategy.type }} + {{- if .Values.historical.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - {{ .Values.historical.persistence.accessMode | quote }} + {{- if .Values.historical.persistence.storageClass }} + {{- if (eq "-" .Values.historical.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.historical.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.historical.persistence.size }}" + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/middleManager/hpa.yaml b/ansible/roles/helm_install/files/druid/templates/middleManager/hpa.yaml new file mode 100644 index 0000000..fcda2f2 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/middleManager/hpa.yaml @@ -0,0 +1,40 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.middleManager.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "druid.middleManager.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: "{{ .Values.name }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "druid.middleManager.fullname" . }} + minReplicas: {{ .Values.middleManager.autoscaling.minReplicas }} + maxReplicas: {{ .Values.middleManager.autoscaling.maxReplicas }} + metrics: +{{ toYaml .Values.middleManager.autoscaling.metrics | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/templates/middleManager/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/middleManager/ingress.yaml new file mode 100644 index 0000000..672580a --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/middleManager/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.middleManager.ingress.enabled -}} +{{- $fullName := include "druid.middleManager.fullname" . -}} +{{- $ingressPath := .Values.middleManager.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.middleManager.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.middleManager.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.middleManager.ingress.tls }} + tls: + {{- range .Values.middleManager.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.middleManager.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/middleManager/pdb.yaml b/ansible/roles/helm_install/files/druid/templates/middleManager/pdb.yaml new file mode 100644 index 0000000..5e3bccf --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/middleManager/pdb.yaml @@ -0,0 +1,43 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.middleManager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "druid.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: {{ .Values.middleManager.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "druid.middleManager.fullname" . }} +spec: +{{- if .Values.middleManager.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.middleManager.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.middleManager.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.middleManager.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "druid.name" . }} + component: {{ .Values.middleManager.name }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/druid/templates/middleManager/service.yaml b/ansible/roles/helm_install/files/druid/templates/middleManager/service.yaml new file mode 100644 index 0000000..2d70ead --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/middleManager/service.yaml @@ -0,0 +1,48 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.middleManager.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.middleManager.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.middleManager.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.middleManager.serviceType }} + ports: + - port: {{ .Values.middleManager.port }} + targetPort: http + protocol: TCP + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - port: {{ .Values.configVars.druid_emitter_prometheus_port }} + targetPort: metric + protocol: TCP + name: metric + {{ end }} + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.middleManager.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/middleManager/statefulset.yaml b/ansible/roles/helm_install/files/druid/templates/middleManager/statefulset.yaml new file mode 100644 index 0000000..05a8ac6 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/middleManager/statefulset.yaml @@ -0,0 +1,164 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.middleManager.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "druid.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: {{ .Values.middleManager.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "druid.middleManager.fullname" . }} +spec: + serviceName: {{ template "druid.middleManager.fullname" . }} + replicas: {{ .Values.middleManager.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.middleManager.name }} + template: + metadata: + labels: + app: {{ template "druid.name" . }} + component: {{ .Values.middleManager.name }} + release: {{ .Release.Name }} + {{- if .Values.middleManager.podAnnotations }} + annotations: +{{ toYaml .Values.middleManager.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if or .Values.middleManager.antiAffinity .Values.middleManager.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.middleManager.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "druid.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.middleManager.name }}" + {{- else if eq .Values.middleManager.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "druid.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.middleManager.name }}" + {{- end }} + {{- with .Values.middleManager.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.middleManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.middleManager.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.middleManager.securityContext }} + securityContext: +{{ toYaml .Values.middleManager.securityContext | indent 8 }} +{{- end }} +{{- if .Values.middleManager.tolerations }} + tolerations: +{{ toYaml .Values.middleManager.tolerations | indent 8 }} +{{- end }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }} + containers: + - name: druid + args: [ "middleManager" ] + env: + {{- range $key, $val := .Values.middleManager.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + resources: +{{ toYaml .Values.middleManager.resources | indent 12 }} + livenessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.middleManager.port }} + readinessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.middleManager.port }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: {{ .Values.middleManager.port }} + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - name: metric + containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }} + protocol: TCP + {{ end }} + volumeMounts: + - mountPath: /opt/druid/var/druid/ + name: data + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + mountPath: /var/secrets/google + {{- end }} + volumes: + {{- if not .Values.middleManager.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + secret: + secretName: {{ .Values.gCloudStorage.secretName }} + {{- end }} + updateStrategy: + type: {{ .Values.middleManager.updateStrategy.type }} + {{- if .Values.middleManager.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - {{ .Values.middleManager.persistence.accessMode | quote }} + {{- if .Values.middleManager.persistence.storageClass }} + {{- if (eq "-" .Values.middleManager.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.middleManager.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.middleManager.persistence.size }}" + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/overlord/deployment.yaml b/ansible/roles/helm_install/files/druid/templates/overlord/deployment.yaml new file mode 100644 index 0000000..2484beb --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/overlord/deployment.yaml @@ -0,0 +1,105 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.overlord.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "druid.overlord.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.overlord.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.overlord.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.overlord.name }} + template: + metadata: + labels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.overlord.name }} + {{- with .Values.overlord.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "overlord" ] + env: + {{- range $key, $val := .Values.overlord.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + ports: + - name: http + containerPort: {{ .Values.overlord.port }} + protocol: TCP + livenessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.overlord.port }} + readinessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.overlord.port }} + resources: +{{ toYaml .Values.overlord.resources | indent 12 }} + volumeMounts: + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + mountPath: /var/secrets/google + {{- end }} + volumes: + {{- if .Values.gCloudStorage.enabled }} + - name: google-cloud-key + secret: + secretName: {{ .Values.gCloudStorage.secretName }} + {{- end }} + {{- with .Values.overlord.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.overlord.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.overlord.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/overlord/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/overlord/ingress.yaml new file mode 100644 index 0000000..f1f3051 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/overlord/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.overlord.ingress.enabled -}} +{{- $fullName := include "druid.overlord.fullname" . -}} +{{- $ingressPath := .Values.overlord.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.overlord.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.overlord.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.overlord.ingress.tls }} + tls: + {{- range .Values.overlord.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.overlord.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/overlord/service.yaml b/ansible/roles/helm_install/files/druid/templates/overlord/service.yaml new file mode 100644 index 0000000..6a9b856 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/overlord/service.yaml @@ -0,0 +1,42 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.overlord.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.overlord.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.overlord.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.overlord.serviceType }} + ports: + - port: {{ .Values.overlord.port }} + targetPort: http + protocol: TCP + name: http + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.overlord.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/router/deployment.yaml b/ansible/roles/helm_install/files/druid/templates/router/deployment.yaml new file mode 100644 index 0000000..4a759d6 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/router/deployment.yaml @@ -0,0 +1,99 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.router.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "druid.router.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.router.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.router.replicaCount }} + selector: + matchLabels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.router.name }} + template: + metadata: + labels: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.router.name }} + {{- with .Values.router.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "router" ] + env: + {{- range $key, $val := .Values.router.config }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + envFrom: + - configMapRef: + name: {{ template "druid.name" . }} + ports: + - name: http + containerPort: {{ .Values.router.port }} + protocol: TCP + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - name: metric + containerPort: {{ .Values.configVars.druid_emitter_prometheus_port }} + protocol: TCP + {{ end }} + livenessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.router.port }} + readinessProbe: + initialDelaySeconds: 60 + httpGet: + path: /status/health + port: {{ .Values.router.port }} + resources: +{{ toYaml .Values.router.resources | indent 12 }} + {{- with .Values.router.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.router.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.router.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/router/ingress.yaml b/ansible/roles/helm_install/files/druid/templates/router/ingress.yaml new file mode 100644 index 0000000..aab27fc --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/router/ingress.yaml @@ -0,0 +1,58 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.router.ingress.enabled -}} +{{- $fullName := include "druid.router.fullname" . -}} +{{- $ingressPath := .Values.router.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.router.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.router.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.router.ingress.tls }} + tls: + {{- range .Values.router.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.router.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: http + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/router/service.yaml b/ansible/roles/helm_install/files/druid/templates/router/service.yaml new file mode 100644 index 0000000..21bea3f --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/router/service.yaml @@ -0,0 +1,49 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.router.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "druid.router.fullname" . }} + labels: + app: {{ include "druid.name" . }} + chart: {{ include "druid.chart" . }} + component: {{ .Values.router.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.router.serviceType }} + ports: + - port: {{ .Values.router.port }} + nodePort: {{ .Values.router.nodePort }} + targetPort: http + protocol: TCP + name: http + {{ if .Values.configVars.druid_emitter_prometheus_port }} + - port: {{ .Values.configVars.druid_emitter_prometheus_port }} + targetPort: metric + protocol: TCP + name: metric + {{ end }} + selector: + app: {{ include "druid.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.router.name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/templates/secrets.yaml b/ansible/roles/helm_install/files/druid/templates/secrets.yaml new file mode 100644 index 0000000..5245180 --- /dev/null +++ b/ansible/roles/helm_install/files/druid/templates/secrets.yaml @@ -0,0 +1,28 @@ +{{/* + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/}} + +{{- if .Values.gCloudStorage.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: google-cloud-key +type: Opaque +data: + key.json: {{ .Values.google.gcsAPIKey }} +{{- end }} diff --git a/ansible/roles/helm_install/files/druid/values.yaml b/ansible/roles/helm_install/files/druid/values.yaml new file mode 100644 index 0000000..000cfbf --- /dev/null +++ b/ansible/roles/helm_install/files/druid/values.yaml @@ -0,0 +1,419 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default values for druid. + +image: + repository: apache/druid + tag: 0.23.0 + pullPolicy: IfNotPresent + pullSecrets: [] + +configMap: + ## If false, configMap will not be applied + ## + enabled: true + +## Define the key value pairs in the configmap +configVars: + ## DRUID env vars. ref: https://github.com/apache/druid/blob/master/distribution/docker/druid.sh#L29 + # DRUID_LOG_LEVEL: "warn" + # DRUID_LOG4J: + DRUID_USE_CONTAINER_IP: "true" + + ## Druid Common Configurations. ref: https://druid.apache.org/docs/latest/configuration/index.html#common-configurations + druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage"]' + druid_metadata_storage_type: postgresql + druid_metadata_storage_connector_connectURI: jdbc:postgresql://postgres:5432/druid + druid_metadata_storage_connector_user: druid + druid_metadata_storage_connector_password: druid + druid_storage_type: local + druid_indexer_logs_type: file + druid_indexer_logs_directory: /opt/data/indexing-logs + + ## Druid Emitting Metrics. ref: https://druid.apache.org/docs/latest/configuration/index.html#emitting-metrics + druid_emitter: noop + druid_emitter_logging_logLevel: debug + druid_emitter_http_recipientBaseUrl: http://druid_exporter_url:druid_exporter_port/druid + +gCloudStorage: + enabled: false + secretName: google-cloud-key + +broker: + ## If false, broker will not be installed + ## + enabled: true + name: broker + replicaCount: 1 + port: 8082 + serviceType: ClusterIP + + config: + DRUID_XMX: 512m + DRUID_XMS: 512m + DRUID_MAXDIRECTMEMORYSIZE: 400m + druid_processing_buffer_sizeBytes: '50000000' + druid_processing_numMergeBuffers: 2 + druid_processing_numThreads: 1 + # druid_monitoring_monitors: '["org.apache.druid.client.cache.CacheMonitor", "org.apache.druid.server.metrics.QueryCountStatsMonitor"]' + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + # limits: + # cpu: 1 + # memory: 1Gi + # requests: + # cpu: 250m + # memory: 512Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + podAnnotations: {} + +coordinator: + ## If false, coordinator will not be installed + ## + enabled: true + name: coordinator + replicaCount: 1 + port: 8081 + serviceType: ClusterIP + + config: + DRUID_XMX: 256m + DRUID_XMS: 256m + # druid_monitoring_monitors: '["org.apache.druid.server.metrics.TaskCountStatsMonitor"]' + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + # limits: + # cpu: 500m + # memory: 1Gi + # requests: + # cpu: 250m + # memory: 512Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + podAnnotations: {} + +overlord: + ## If true, the separate overlord will be installed + ## + enabled: false + name: overlord + replicaCount: 1 + port: 8081 + serviceType: ClusterIP + + javaOpts: "-Xms1G -Xmx1G" + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + podAnnotations: {} + +historical: + ## If false, historical will not be installed + ## + enabled: true + name: historical + replicaCount: 1 + port: 8083 + serviceType: ClusterIP + + config: + DRUID_XMX: 512m + DRUID_XMS: 512m + DRUID_MAXDIRECTMEMORYSIZE: 400m + druid_processing_buffer_sizeBytes: '50000000' + druid_processing_numMergeBuffers: 2 + druid_processing_numThreads: 1 + # druid_monitoring_monitors: '["org.apache.druid.client.cache.CacheMonitor", "org.apache.druid.server.metrics.HistoricalMetricsMonitor", "org.apache.druid.server.metrics.QueryCountStatsMonitor"]' + # druid_segmentCache_locations: '[{"path":"/var/druid/segment-cache","maxSize":300000000000}]' + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + persistence: + enabled: true + accessMode: ReadWriteOnce + size: "4Gi" + # storageClass: "ssd" + + antiAffinity: "soft" + + nodeAffinity: {} + + nodeSelector: {} + + securityContext: + fsGroup: 1000 + + tolerations: [] + + resources: {} + # limits: + # cpu: 2 + # memory: 2Gi + # requests: + # cpu: 500m + # memory: 512Mi + + livenessProbeInitialDelaySeconds: 60 + readinessProbeInitialDelaySeconds: 60 + + ## (dict) If specified, apply these annotations to each master Pod + podAnnotations: {} + + podDisruptionBudget: + enabled: false + # minAvailable: 2 + maxUnavailable: 1 + + updateStrategy: + type: RollingUpdate + +middleManager: + ## If false, middleManager will not be installed + ## + enabled: true + name: middle-manager + replicaCount: 1 + port: 8091 + serviceType: ClusterIP + + config: + DRUID_XMX: 64m + DRUID_XMS: 64m + druid_indexer_runner_javaOptsArray: '["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=300m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-XX:+ExitOnOutOfMemoryError", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]' + druid_indexer_fork_property_druid_processing_buffer_sizeBytes: '25000000' + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 60 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 60 + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + persistence: + enabled: true + accessMode: ReadWriteOnce + size: "4Gi" + # storageClass: "ssd" + + antiAffinity: "soft" + + nodeAffinity: {} + + nodeSelector: {} + + securityContext: + fsGroup: 1000 + + tolerations: [] + + resources: {} + # limits: + # cpu: 500m + # memory: 1Gi + # requests: + # cpu: 250m + # memory: 256Mi + + ## (dict) If specified, apply these annotations to each master Pod + podAnnotations: {} + + podDisruptionBudget: + enabled: false + # minAvailable: 2 + maxUnavailable: 1 + + updateStrategy: + type: RollingUpdate + +router: + ## If false, router will not be installed + ## + enabled: true + name: router + replicaCount: 1 + port: 8888 + serviceType: ClusterIP + + config: + DRUID_XMX: 128m + DRUID_XMS: 128m + DRUID_MAXDIRECTMEMORYSIZE: 128m + + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + resources: {} + # limits: + # cpu: 250m + # memory: 256Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + podAnnotations: {} + +# ------------------------------------------------------------------------------ +# Zookeeper: +# ------------------------------------------------------------------------------ + +# zkHosts: druid-zookeeper-headless:2181 + +zookeeper: + enabled: true + ## Environmental variables to set in Zookeeper + ## + env: + ## The JVM heap size to allocate to Zookeeper + ZK_HEAP_SIZE: "512M" + ## Configure Zookeeper headless + headless: + publishNotReadyAddresses: true + + +# ------------------------------------------------------------------------------ +# MySQL: +# ------------------------------------------------------------------------------ +mysql: + enabled: false + mysqlRootPassword: druidroot + mysqlUser: druid + mysqlPassword: druid + mysqlDatabase: druid + configurationFiles: + mysql_collate.cnf: |- + [mysqld] + character-set-server=utf8 + collation-server=utf8_unicode_ci + +# ------------------------------------------------------------------------------ +# postgres: +# ------------------------------------------------------------------------------ +postgresql: + enabled: true + postgresqlUsername: druid + postgresqlPassword: druid + postgresqlDatabase: druid + service: + port: 5432 + +# Secrets diff --git a/ansible/roles/helm_install/files/elasticsearch/.helmignore b/ansible/roles/helm_install/files/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/ansible/roles/helm_install/files/elasticsearch/Chart.yaml b/ansible/roles/helm_install/files/elasticsearch/Chart.yaml new file mode 100644 index 0000000..0d10900 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +maintainers: + - email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +version: 8.4.1 +appVersion: 8.4.1 +sources: + - https://github.com/elastic/elasticsearch +icon: https://helm.elastic.co/icons/elasticsearch.png diff --git a/ansible/roles/helm_install/files/elasticsearch/Makefile b/ansible/roles/helm_install/files/elasticsearch/Makefile new file mode 100644 index 0000000..22218a1 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/ansible/roles/helm_install/files/elasticsearch/README.md b/ansible/roles/helm_install/files/elasticsearch/README.md new file mode 100644 index 0000000..6136087 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/README.md @@ -0,0 +1,465 @@ +# Elasticsearch Helm Chart + +[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+main.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+main/) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/search?repo=elastic) + +This Helm chart is a lightweight way to configure and run our official +[Elasticsearch Docker image][]. + + +**Warning**: This branch is used for development, please use the latest [7.x][] release for released version. + + + + + +- [Requirements](#requirements) +- [Installing](#installing) + - [Install a released version using the Helm repository](#install-a-released-version-using-the-helm-repository) + - [Install a development version using the main branch](#install-a-development-version-using-the-main-branch) +- [Upgrading](#upgrading) +- [Usage notes](#usage-notes) +- [Configuration](#configuration) +- [FAQ](#faq) + - [How to deploy this chart on a specific K8S distribution?](#how-to-deploy-this-chart-on-a-specific-k8s-distribution) + - [How to deploy dedicated nodes types?](#how-to-deploy-dedicated-nodes-types) + - [Coordinating nodes](#coordinating-nodes) + - [Clustering and Node Discovery](#clustering-and-node-discovery) + - [How to deploy clusters with security (authentication and TLS) enabled?](#how-to-deploy-clusters-with-security-authentication-and-tls-enabled) + - [How to migrate from helm/charts stable chart?](#how-to-migrate-from-helmcharts-stable-chart) + - [How to install plugins?](#how-to-install-plugins) + - [How to use the keystore?](#how-to-use-the-keystore) + - [Basic example](#basic-example) + - [Multiple keys](#multiple-keys) + - [Custom paths and keys](#custom-paths-and-keys) + - [How to enable snapshotting?](#how-to-enable-snapshotting) + - [How to configure templates post-deployment?](#how-to-configure-templates-post-deployment) +- [Contributing](#contributing) + + + + + + +## Requirements + +* Minimum cluster requirements include the following to run this chart with +default settings. All of these settings are configurable. + * Three Kubernetes nodes to respect the default "hard" affinity settings + * 1GB of RAM for the JVM heap + +See [supported configurations][] for more details. + + +## Installing + +### Install a released version using the Helm repository + +* Add the Elastic Helm charts repo: +`helm repo add elastic https://helm.elastic.co` + +* Install it: `helm install elasticsearch elastic/elasticsearch` + +### Install a development version using the main branch + +* Clone the git repo: `git clone git@github.com:elastic/helm-charts.git` + +* Install it: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=8.4.1` + +## Upgrading + +Please always check [CHANGELOG.md][] and [BREAKING_CHANGES.md][] before +upgrading to a new chart version. + + +## Usage notes + +* This repo includes several [examples][] of configurations that can be used +as a reference. They are also used in the automated testing of this chart. +* Automated testing of this chart is currently only run against GKE (Google +Kubernetes Engine). +* The chart deploys a StatefulSet and by default will do an automated rolling +update of your cluster. It does this by waiting for the cluster health to become +green after each instance is updated. If you prefer to update manually you can +set `OnDelete` [updateStrategy][]. +* It is important to verify that the JVM heap size in `esJavaOpts` and to set +the CPU/Memory `resources` to something suitable for your cluster. +* To simplify chart and maintenance each set of node groups is deployed as a +separate Helm release. Take a look at the [multi][] example to get an idea for +how this works. Without doing this it isn't possible to resize persistent +volumes in a StatefulSet. By setting it up this way it makes it possible to add +more nodes with a new storage size then drain the old ones. It also solves the +problem of allowing the user to determine which node groups to update first when +doing upgrades or changes. +* We have designed this chart to be very un-opinionated about how to configure +Elasticsearch. It exposes ways to set environment variables and mount secrets +inside of the container. Doing this makes it much easier for this chart to +support multiple versions with minimal changes. + + +## Configuration + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------| +| `antiAffinityTopologyKey` | The [anti-affinity][] topology key. By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | +| `antiAffinity` | Setting this to hard enforces the [anti-affinity][] rules. If it is set to soft it will be done "best effort". Other values will be ignored | `hard` | +| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params][] that will be used by readiness [probe][] command | `wait_for_status=green&timeout=1s` | +| `clusterName` | This will be used as the Elasticsearch [cluster.name][] and should be unique per cluster in the namespace | `elasticsearch` | +| `createCert` | This will automatically create the SSL certificates | `true` | +| `enableServiceLinks` | Set to false to disabling service links, which can cause slow pod startup times when there are many services in the current namespace. | `true` | +| `envFrom` | Templatable string to be passed to the [environment from variables][] which will be appended to the `envFrom:` definition for the container | `[]` | +| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml][] for an example of the formatting | `{}` | +| `esJavaOpts` | [Java options][] for Elasticsearch. This is where you could configure the [jvm heap size][] | `""` | +| `esJvmOptions` | [Java options][] for Elasticsearch. Override the default JVM options by adding custom options files . See [values.yaml][] for an example of the formatting | `{}` | +| `esMajorVersion` | Deprecated. Instead, use the version of the chart corresponding to your ES minor version. Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | +| `extraContainers` | Templatable string of additional `containers` to be passed to the `tpl` function | `""` | +| `extraEnvs` | Extra [environment variables][] which will be appended to the `env:` definition for the container | `[]` | +| `extraInitContainers` | Templatable string of additional `initContainers` to be passed to the `tpl` function | `""` | +| `extraVolumeMounts` | Templatable string of additional `volumeMounts` to be passed to the `tpl` function | `""` | +| `extraVolumes` | Templatable string of additional `volumes` to be passed to the `tpl` function | `""` | +| `fullnameOverride` | Overrides the `clusterName` and `nodeGroup` when used in the naming of resources. This should only be used when using a single `nodeGroup`, otherwise you will have name conflicts | `""` | +| `healthNameOverride` | Overrides `test-elasticsearch-health` pod name | `""` | +| `hostAliases` | Configurable [hostAliases][] | `[]` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port][] in `extraEnvs` | `9200` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy][] value | `IfNotPresent` | +| `imagePullSecrets` | Configuration for [imagePullSecrets][] so that you can use a private registry for your image | `[]` | +| `imageTag` | The Elasticsearch Docker image tag | `8.4.1` | +| `image` | The Elasticsearch Docker image | `docker.elastic.co/elasticsearch/elasticsearch` | +| `ingress` | Configurable [ingress][] to expose the Elasticsearch service. See [values.yaml][] for an example | see [values.yaml][] | +| `initResources` | Allows you to set the [resources][] for the `initContainer` in the StatefulSet | `{}` | +| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example][] and [how to use the keystore][] | `[]` | +| `labels` | Configurable [labels][] applied to all Elasticsearch pods | `{}` | +| `lifecycle` | Allows you to add [lifecycle hooks][]. See [values.yaml][] for an example of the formatting | `{}` | +| `masterService` | The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery][] for more information | `""` | +| `maxUnavailable` | The [maxUnavailable][] value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | +| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` | +| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` | +| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` | +| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false,transport.enabled: false}` | +| `nodeAffinity` | Value for the [node affinity settings][] | `{}` | +| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` | +| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` | +| `persistence` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles][] which don't require persistent data | see [values.yaml][] | +| `podAnnotations` | Configurable [annotations][] applied to all Elasticsearch pods | `{}` | +| `podManagementPolicy` | By default Kubernetes [deploys StatefulSets serially][]. This deploys them in parallel so that they can discover each other | `Parallel` | +| `podSecurityContext` | Allows you to set the [securityContext][] for the pod | see [values.yaml][] | +| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | see [values.yaml][] | +| `priorityClassName` | The name of the [PriorityClass][]. No default is supplied as the PriorityClass must be created first | `""` | +| `protocol` | The protocol that will be used for the readiness [probe][]. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | +| `rbac` | Configuration for creating a role, role binding and ServiceAccount as part of this Helm chart with `create: true`. Also can be used to reference an external ServiceAccount with `serviceAccountName: "externalServiceAccountName"`, or automount the service account token | see [values.yaml][] | +| `readinessProbe` | Configuration fields for the readiness [probe][] | see [values.yaml][] | +| `replicas` | Kubernetes replica count for the StatefulSet (i.e. how many pods) | `3` | +| `resources` | Allows you to set the [resources][] for the StatefulSet | see [values.yaml][] | +| `roles` | A list with the specific [roles][] for the `nodeGroup` | see [values.yaml][] | +| `schedulerName` | Name of the [alternate scheduler][] | `""` | +| `secret.enabled` | Enable Secret creation for Elasticsearch credentials | `true` | +| `secret.password` | Initial password for the elastic user | `""` (generated randomly) | +| `secretMounts` | Allows you easily mount a secret as a file inside the StatefulSet. Useful for mounting certificates and other secrets. See [values.yaml][] for an example | `[]` | +| `securityContext` | Allows you to set the [securityContext][] for the container | see [values.yaml][] | +| `service.annotations` | [LoadBalancer annotations][] that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` | `{}` | +| `service.enabled` | Enable non-headless service | `true` | +| `service.externalTrafficPolicy` | Some cloud providers allow you to specify the [LoadBalancer externalTrafficPolicy][]. Kubernetes will use this to preserve the client source IP. This will configure load balancer if `service.type` is `LoadBalancer` | `""` | +| `service.httpPortName` | The name of the http port within the service | `http` | +| `service.labelsHeadless` | Labels to be added to headless service | `{}` | +| `service.labels` | Labels to be added to non-headless service | `{}` | +| `service.loadBalancerIP` | Some cloud providers allow you to specify the [loadBalancer][] IP. If the `loadBalancerIP` field is not specified, the IP is dynamically assigned. If you specify a `loadBalancerIP` but your cloud provider does not support the feature, it is ignored. | `""` | +| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` | +| `service.nodePort` | Custom [nodePort][] port that can be set if you are using `service.type: nodePort` | `""` | +| `service.transportPortName` | The name of the transport port within the service | `transport` | +| `service.publishNotReadyAddresses` | Consider that all endpoints are considered "ready" even if the Pods themselves are not | `false` | +| `service.type` | Elasticsearch [Service Types][] | `ClusterIP` | +| `sysctlInitContainer` | Allows you to disable the `sysctlInitContainer` if you are setting [sysctl vm.max_map_count][] with another method | `enabled: true` | +| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count][] needed for Elasticsearch | `262144` | +| `terminationGracePeriod` | The [terminationGracePeriod][] in seconds used when trying to stop the pod | `120` | +| `tests.enabled` | Enable creating test related resources when running `helm template` or `helm test` | `true` | +| `tolerations` | Configurable [tolerations][] | `[]` | +| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration][] in `extraEnvs` | `9300` | +| `updateStrategy` | The [updateStrategy][] for the StatefulSet. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | +| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for StatefulSets][]. You will want to adjust the storage (default `30Gi` ) and the `storageClassName` if you are using a different storage class | see [values.yaml][] | + + +## FAQ + +### How to deploy this chart on a specific K8S distribution? + +This chart is designed to run on production scale Kubernetes clusters with +multiple nodes, lots of memory and persistent storage. For that reason it can be +a bit tricky to run them against local Kubernetes environments such as +[Minikube][]. + +This chart is highly tested with [GKE][], but some K8S distribution also +requires specific configurations. + +We provide examples of configuration for the following K8S providers: + +- [Docker for Mac][] +- [KIND][] +- [Minikube][] +- [MicroK8S][] +- [OpenShift][] + +### How to deploy dedicated nodes types? + +All the Elasticsearch pods deployed share the same configuration. If you need to +deploy dedicated [nodes types][] (for example dedicated master and data nodes), +you can deploy multiple releases of this chart with different configurations +while they share the same `clusterName` value. + +For each Helm release, the nodes types can then be defined using `roles` value. + +An example of Elasticsearch cluster using 2 different Helm releases for master, +data and coordinating nodes can be found in [examples/multi][]. + +#### Coordinating nodes + +Every node is implicitly a coordinating node. This means that a node that has an +explicit empty list of roles will only act as a coordinating node. + +When deploying coordinating-only node with Elasticsearch chart, it is required +to define the empty list of roles in both `roles` value and `node.roles` +settings: + +```yaml +roles: [] + +esConfig: + elasticsearch.yml: | + node.roles: [] +``` + +More details in [#1186 (comment)][] + +#### Clustering and Node Discovery + +This chart facilitates Elasticsearch node discovery and services by creating two +`Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` +and another named `$clusterName-$nodeGroup-headless`. +Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all +pods ( `Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. + +If your group of master nodes has the default `nodeGroup: master` then you can +just add new groups of nodes with a different `nodeGroup` and they will +automatically discover the correct master. If your master nodes have a different +`nodeGroup` name then you will need to set `masterService` to +`$clusterName-$masterNodeGroup`. + +The chart value for `masterService` is used to populate +`discovery.zen.ping.unicast.hosts` , which Elasticsearch nodes will use to +contact master nodes and form a cluster. +Therefore, to add a group of nodes to an existing cluster, setting +`masterService` to the desired `Service` name of the related cluster is +sufficient. + +### How to deploy clusters with security (authentication and TLS) enabled? + +This Helm chart can generate a [Kubernetes Secret][] or use an existing one to +setup Elastic credentials. + +This Helm chart can use existing [Kubernetes Secret][] to setup Elastic +certificates for example. These secrets should be created outside of this chart +and accessed using [environment variables][] and volumes. + +This chart is setting TLS and creating a certificate by default, but you can also provide your own certs as a K8S secret. An example of configuration for providing existing certificates can be found in [examples/security][]. + +### How to migrate from helm/charts stable chart? + +If you currently have a cluster deployed with the [helm/charts stable][] chart +you can follow the [migration guide][]. + +### How to install plugins? + +The recommended way to install plugins into our Docker images is to create a +[custom Docker image][]. + +The Dockerfile would look something like: + +``` +ARG elasticsearch_version +FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} + +RUN bin/elasticsearch-plugin install --batch repository-gcs +``` + +And then updating the `image` in values to point to your custom image. + +There are a couple reasons we recommend this. + +1. Tying the availability of Elasticsearch to the download service to install +plugins is not a great idea or something that we recommend. Especially in +Kubernetes where it is normal and expected for a container to be moved to +another host at random times. +2. Mutating the state of a running Docker image (by installing plugins) goes +against best practices of containers and immutable infrastructure. + +### How to use the keystore? + +#### Basic example + +Create the secret, the key name needs to be the keystore key path. In this +example we will create a secret from a file and from a literal string. + +``` +kubectl create secret generic encryption-key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key +kubectl create secret generic slack-hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +To add these secrets to the keystore: + +``` +keystore: + - secretName: encryption-key + - secretName: slack-hook +``` + +#### Multiple keys + +All keys in the secret will be added to the keystore. To create the previous +example in one secret you could also do: + +``` +kubectl create secret generic keystore-secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +``` +keystore: + - secretName: keystore-secrets +``` + +#### Custom paths and keys + +If you are using these secrets for other applications (besides the Elasticsearch +keystore) then it is also possible to specify the keystore path and which keys +you want to add. Everything specified under each `keystore` item will be passed +through to the `volumeMounts` section for mounting the [secret][]. In this +example we will only add the `slack_hook` key from a secret that also has other +keys. Our secret looks like this: + +``` +kubectl create secret generic slack-secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +We only want to add the `slack_hook` key to the keystore at path +`xpack.notification.slack.account.monitoring.secure_url`: + +``` +keystore: + - secretName: slack-secrets + items: + - key: slack_hook + path: xpack.notification.slack.account.monitoring.secure_url +``` + +You can also take a look at the [config example][] which is used as part of the +automated testing pipeline. + +### How to enable snapshotting? + +1. Install your [snapshot plugin][] into a custom Docker image following the +[how to install plugins guide][]. +2. Add any required secrets or credentials into an Elasticsearch keystore +following the [how to use the keystore][] guide. +3. Configure the [snapshot repository][] as you normally would. +4. To automate snapshots you can use [Snapshot Lifecycle Management][] or a tool +like [curator][]. + +### How to configure templates post-deployment? + +You can use `postStart` [lifecycle hooks][] to run code triggered after a +container is created. + +Here is an example of `postStart` hook to configure templates: + +```yaml +lifecycle: + postStart: + exec: + command: + - bash + - -c + - | + #!/bin/bash + # Add a template to adjust number of shards/replicas + TEMPLATE_NAME=my_template + INDEX_PATTERN="logstash-*" + SHARD_COUNT=8 + REPLICA_COUNT=1 + ES_URL=http://localhost:9200 + while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +``` + + +## Contributing + +Please check [CONTRIBUTING.md][] before any contribution or for any questions +about our development and testing process. + +[7.x]: https://github.com/elastic/helm-charts/releases +[#1186 (comment)]: https://github.com/elastic/helm-charts/pull/1186#discussion_r631166442 +[BREAKING_CHANGES.md]: https://github.com/elastic/helm-charts/blob/main/BREAKING_CHANGES.md +[CHANGELOG.md]: https://github.com/elastic/helm-charts/blob/main/CHANGELOG.md +[CONTRIBUTING.md]: https://github.com/elastic/helm-charts/blob/main/CONTRIBUTING.md +[alternate scheduler]: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods +[annotations]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +[anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[cluster.name]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.name.html +[clustering and node discovery]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#clustering-and-node-discovery +[config example]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/config/values.yaml +[curator]: https://www.elastic.co/guide/en/elasticsearch/client/curator/current/snapshot.html +[custom docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_c_customized_image +[deploys statefulsets serially]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +[discovery.zen.minimum_master_nodes]: https://www.elastic.co/guide/en/elasticsearch/reference/current/discovery-settings.html#minimum_master_nodes +[docker for mac]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/docker-for-mac +[elasticsearch cluster health status params]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params +[elasticsearch docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html +[environment variables]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config +[environment from variables]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables +[examples]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/ +[examples/multi]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi +[examples/security]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security +[gke]: https://cloud.google.com/kubernetes-engine +[helm]: https://helm.sh +[helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ +[how to install plugins guide]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#how-to-install-plugins +[how to use the keystore]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#how-to-use-the-keystore +[http.port]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#_settings +[imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images +[imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret +[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[java options]: https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html +[jvm heap size]: https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +[hostAliases]: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +[kind]: https://github.com/elastic/helm-charts/tree/main//elasticsearch/examples/kubernetes-kind +[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[lifecycle hooks]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ +[loadBalancer annotations]: https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws +[loadBalancer externalTrafficPolicy]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip +[loadBalancer]: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +[maxUnavailable]: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +[migration guide]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/README.md +[minikube]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/minikube +[microk8s]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/microk8s +[multi]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/ +[network.host elasticsearch setting]: https://www.elastic.co/guide/en/elasticsearch/reference/current/network.host.html +[node affinity settings]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +[nodePort]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport +[nodes types]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html +[nodeSelector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +[openshift]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift +[priorityClass]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +[probe]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +[roles]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html +[secret]: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets +[securityContext]: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +[service types]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +[snapshot lifecycle management]: https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-lifecycle-management.html +[snapshot plugin]: https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository.html +[snapshot repository]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +[supported configurations]: https://github.com/elastic/helm-charts/blob/main/README.md#supported-configurations +[sysctl vm.max_map_count]: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html#vm-max-map-count +[terminationGracePeriod]: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +[tolerations]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[transport port configuration]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#_transport_settings +[updateStrategy]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +[values.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/values.yaml +[volumeClaimTemplate for statefulsets]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/config/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/config/Makefile new file mode 100644 index 0000000..9ae9c37 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/config/Makefile @@ -0,0 +1,21 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-config +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +secrets: + kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true + kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic + kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key + kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test + +test: secrets install goss + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/config/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/config/README.md new file mode 100644 index 0000000..e2ce8b1 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/config/README.md @@ -0,0 +1,27 @@ +# Config + +This example deploy a single node Elasticsearch 8.4.1 with authentication and +custom [values][]. + + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/config-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/test/goss.yaml +[values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/values.yaml diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/config/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/config/test/goss.yaml new file mode 100644 index 0000000..b71ee37 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/config/test/goss.yaml @@ -0,0 +1,31 @@ +http: + https://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":1' + - '"number_of_data_nodes":1' + + https://localhost:9200: + status: 200 + timeout: 2000 + username: elastic + allow-insecure: true + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "config"' + - "You Know, for Search" + +command: + "elasticsearch-keystore list": + exit-status: 0 + stdout: + - keystore.seed + - bootstrap.password + - xpack.notification.slack.account.monitoring.secure_url + - xpack.notification.slack.account.otheraccount.secure_url + - xpack.watcher.encryption_key diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/config/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/config/values.yaml new file mode 100644 index 0000000..d90e0c8 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/config/values.yaml @@ -0,0 +1,29 @@ +--- +clusterName: "config" +replicas: 1 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-config-credentials + key: password + +# This is just a dummy file to make sure that +# the keystore can be mounted at the same time +# as a custom elasticsearch.yml +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + path.data: /usr/share/elasticsearch/data + +keystore: + - secretName: elastic-config-secret + - secretName: elastic-config-slack + - secretName: elastic-config-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url + +secret: + enabled: false diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/config/watcher_encryption_key b/ansible/roles/helm_install/files/elasticsearch/examples/config/watcher_encryption_key new file mode 100644 index 0000000..b5f9078 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/config/watcher_encryption_key @@ -0,0 +1 @@ +supersecret diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/default/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/default/Makefile new file mode 100644 index 0000000..389bf99 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/default/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-default +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/default/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/default/README.md new file mode 100644 index 0000000..23824c1 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/default/README.md @@ -0,0 +1,25 @@ +# Default + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster using +[default values][]. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/default/test/goss.yaml +[default values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/values.yaml diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/default/rolling_upgrade.sh b/ansible/roles/helm_install/files/elasticsearch/examples/default/rolling_upgrade.sh new file mode 100755 index 0000000..c5a2a88 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/default/rolling_upgrade.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash -x + +kubectl proxy || true & + +make & +PROC_ID=$! + +while kill -0 "$PROC_ID" >/dev/null 2>&1; do + echo "PROCESS IS RUNNING" + if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then + echo "cluster is healthy" + else + echo "cluster not healthy!" + exit 1 + fi + sleep 1 +done +echo "PROCESS TERMINATED" +exit 0 diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/default/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/default/test/goss.yaml new file mode 100644 index 0000000..c6ff486 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/default/test/goss.yaml @@ -0,0 +1,44 @@ +kernel-param: + vm.max_map_count: + value: "262144" + +http: + https://elasticsearch-master:9200/_cluster/health: + status: 200 + timeout: 2000 + username: elastic + allow-insecure: true + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"number" : "8.4.1"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" + +file: + /usr/share/elasticsearch/data: + exists: true + mode: "2775" + owner: root + group: elasticsearch + filetype: directory + +mount: + /usr/share/elasticsearch/data: + exists: true + +user: + elasticsearch: + exists: true + uid: 1000 + gid: 1000 diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/Makefile new file mode 100644 index 0000000..18fd053 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-docker-for-mac +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/README.md new file mode 100644 index 0000000..ddf07ad --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/README.md @@ -0,0 +1,23 @@ +# Docker for Mac + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Docker for Mac][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/docker-for-mac/values.yaml +[docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/ diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/values.yaml new file mode 100644 index 0000000..f7deba6 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/docker-for-mac/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "hostpath" + resources: + requests: + storage: 100M diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/Makefile new file mode 100644 index 0000000..9e5602d --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/Makefile @@ -0,0 +1,17 @@ +default: test + +RELEASE := helm-es-kind +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +install-local-path: + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/README.md new file mode 100644 index 0000000..4a9bdc6 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/README.md @@ -0,0 +1,36 @@ +# KIND + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Kind][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + +Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points +created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][] +fix it in Kind 0.7.0. + +The workaround for Kind < 0.7.0 is to install manually +[Rancher Local Path Provisioner][] and use `local-path` storage class for +Elasticsearch volumes (see [Makefile][] instructions). + + +## Usage + +* For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install` +* For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/values.yaml +[kind]: https://kind.sigs.k8s.io/ +[kind issue]: https://github.com/kubernetes-sigs/kind/issues/830 +[kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157 +[rancher local path provisioner]: https://github.com/rancher/local-path-provisioner +[Makefile]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/Makefile diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values-local-path.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values-local-path.yaml new file mode 100644 index 0000000..500ad4b --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values-local-path.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "local-path" + resources: + requests: + storage: 100M diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values.yaml new file mode 100644 index 0000000..500ad4b --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/kubernetes-kind/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "local-path" + resources: + requests: + storage: 100M diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/Makefile new file mode 100644 index 0000000..2d0012d --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-microk8s +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/README.md new file mode 100644 index 0000000..a717ba9 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/README.md @@ -0,0 +1,32 @@ +# MicroK8S + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [MicroK8S][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +The following MicroK8S [addons][] need to be enabled: +- `dns` +- `helm` +- `storage` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[addons]: https://microk8s.io/docs/addons +[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/microk8s/values.yaml +[MicroK8S]: https://microk8s.io diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/values.yaml new file mode 100644 index 0000000..2627ecb --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/microk8s/values.yaml @@ -0,0 +1,32 @@ +--- +# Disable privileged init Container creation. +sysctlInitContainer: + enabled: false + +# Restrict the use of the memory-mapping when sysctlInitContainer is disabled. +esConfig: + elasticsearch.yml: | + node.store.allow_mmap: false + +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "microk8s-hostpath" + resources: + requests: + storage: 100M diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/migration/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/migration/Makefile new file mode 100644 index 0000000..020906f --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/migration/Makefile @@ -0,0 +1,10 @@ +PREFIX := helm-es-migration + +data: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + +master: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + +client: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/migration/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/migration/README.md new file mode 100644 index 0000000..8124dca --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/migration/README.md @@ -0,0 +1,167 @@ +# Migration Guide from helm/charts + +There are two viable options for migrating from the community Elasticsearch Helm +chart from the [helm/charts][] repo. + +1. Restoring from Snapshot to a fresh cluster +2. Live migration by joining a new cluster to the existing cluster. + +## Restoring from Snapshot + +This is the recommended and preferred option. The downside is that it will +involve a period of write downtime during the migration. If you have a way to +temporarily stop writes to your cluster then this is the way to go. This is also +a lot simpler as it just involves launching a fresh cluster and restoring a +snapshot following the [restoring to a different cluster guide][]. + +## Live migration + +If restoring from a snapshot is not possible due to the write downtime then a +live migration is also possible. It is very important to first test this in a +testing environment to make sure you are comfortable with the process and fully +understand what is happening. + +This process will involve joining a new set of master, data and client nodes to +an existing cluster that has been deployed using the [helm/charts][] community +chart. Nodes will then be replaced one by one in a controlled fashion to +decommission the old cluster. + +This example will be using the default values for the existing helm/charts +release and for the Elastic helm-charts release. If you have changed any of the +default values then you will need to first make sure that your values are +configured in a compatible way before starting the migration. + +The process will involve a re-sync and a rolling restart of all of your data +nodes. Therefore it is important to disable shard allocation and perform a synced +flush like you normally would during any other rolling upgrade. See the +[rolling upgrades guide][] for more information. + +* The default image for this chart is +`docker.elastic.co/elasticsearch/elasticsearch` which contains the default +distribution of Elasticsearch with a [basic license][]. Make sure to update the +`image` and `imageTag` values to the correct Docker image and Elasticsearch +version that you currently have deployed. + +* Convert your current helm/charts configuration into something that is +compatible with this chart. + +* Take a fresh snapshot of your cluster. If something goes wrong you want to be +able to restore your data no matter what. + +* Check that your clusters health is green. If not abort and make sure your +cluster is healthy before continuing: + + ``` + curl localhost:9200/_cluster/health + ``` + +* Deploy new data nodes which will join the existing cluster. Take a look at the +configuration in [data.yaml][]: + + ``` + make data + ``` + +* Check that the new nodes have joined the cluster (run this and any other curl +commands from within one of your pods): + + ``` + curl localhost:9200/_cat/nodes + ``` + +* Check that your cluster is still green. If so we can now start to scale down +the existing data nodes. Assuming you have the default amount of data nodes (2) +we now want to scale it down to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=1 + ``` + +* Wait for your cluster to become green again: + + ``` + watch 'curl -s localhost:9200/_cluster/health' + ``` + +* Once the cluster is green we can scale down again: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=0 + ``` + +* Wait for the cluster to be green again. +* OK. We now have all data nodes running in the new cluster. Time to replace the +masters by firstly scaling down the masters from 3 to 2. Between each step make +sure to wait for the cluster to become green again, and check with +`curl localhost:9200/_cat/nodes` that you see the correct amount of master +nodes. During this process we will always make sure to keep at least 2 master +nodes as to not lose quorum: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=2 + ``` + +* Now deploy a single new master so that we have 3 masters again. See +[master.yaml][] for the configuration: + + ``` + make master + ``` + +* Scale down old masters to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=1 + ``` + +* Edit the masters in [masters.yaml][] to 2 and redeploy: + + ``` + make master + ``` + +* Scale down the old masters to 0: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=0 + ``` + +* Edit the [masters.yaml][] to have 3 replicas and remove the +`discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the +masters. This will make sure all 3 masters are running in the new cluster and +are pointing at each other for discovery: + + ``` + make master + ``` + +* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then +redeploy the data nodes to make sure they are pointing at the new masters: + + ``` + make data + ``` + +* Deploy the client nodes: + + ``` + make client + ``` + +* Update any processes that are talking to the existing client nodes and point +them to the new client nodes. Once this is done you can scale down the old +client nodes: + + ``` + kubectl scale deployment my-release-elasticsearch-client --replicas=0 + ``` + +* The migration should now be complete. After verifying that everything is +working correctly you can cleanup leftover resources from your old cluster. + +[basic license]: https://www.elastic.co/subscriptions +[data.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/data.yaml +[helm/charts]: https://github.com/helm/charts/tree/master/stable/elasticsearch +[master.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/master.yaml +[restoring to a different cluster guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html#_restoring_to_a_different_cluster +[rolling upgrades guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/migration/client.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/migration/client.yaml new file mode 100644 index 0000000..8ac0641 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/migration/client.yaml @@ -0,0 +1,19 @@ +--- +replicas: 2 + +clusterName: "elasticsearch" +nodeGroup: "client" + +esMajorVersion: 6 + +roles: [] + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + storageClassName: "standard" + resources: + requests: + storage: 1Gi # Currently needed till pvcs are made optional + +persistence: + enabled: false diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/migration/data.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/migration/data.yaml new file mode 100644 index 0000000..012569d --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/migration/data.yaml @@ -0,0 +1,14 @@ +--- +replicas: 2 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "data" + +roles: + - data diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/migration/master.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/migration/master.yaml new file mode 100644 index 0000000..9f2f609 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/migration/master.yaml @@ -0,0 +1,23 @@ +--- +# Temporarily set to 3 so we can scale up/down the old a new cluster +# one at a time whilst always keeping 3 masters running +replicas: 1 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "master" + +roles: + - master + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + storageClassName: "standard" + resources: + requests: + storage: 4Gi diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/minikube/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/Makefile new file mode 100644 index 0000000..1021d98 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-minikube +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/minikube/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/README.md new file mode 100644 index 0000000..6c84728 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/README.md @@ -0,0 +1,38 @@ +# Minikube + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [Minikube][] +using [custom values][]. + +If helm or kubectl timeouts occur, you may consider creating a minikube VM with +more CPU cores or memory allocated. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +In order to properly support the required persistent volume claims for the +Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner` +minikube addons must be enabled. + +``` +minikube addons enable default-storageclass +minikube addons enable storage-provisioner +``` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/minikube/values.yaml +[minikube]: https://minikube.sigs.k8s.io/docs/ diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/minikube/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/values.yaml new file mode 100644 index 0000000..ccceb3a --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/minikube/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 100M diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/multi/Makefile new file mode 100644 index 0000000..243e504 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/Makefile @@ -0,0 +1,19 @@ +default: test + +include ../../../helpers/examples.mk + +PREFIX := helm-es-multi +RELEASE := helm-es-multi-master +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ + +test: install goss + +purge: + helm del $(PREFIX)-master + helm del $(PREFIX)-data + helm del $(PREFIX)-client diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/multi/README.md new file mode 100644 index 0000000..2347c70 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/README.md @@ -0,0 +1,29 @@ +# Multi + +This example deploy an Elasticsearch 8.4.1 cluster composed of 3 different Helm +releases: + +- `helm-es-multi-master` for the 3 master nodes using [master values][] +- `helm-es-multi-data` for the 3 data nodes using [data values][] +- `helm-es-multi-client` for the 3 client nodes using [client values][] + +## Usage + +* Deploy the 3 Elasticsearch releases: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/multi-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[client values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/client.yaml +[data values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/data.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/test/goss.yaml +[master values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/master.yaml diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/client.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/multi/client.yaml new file mode 100644 index 0000000..2c05d1e --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/client.yaml @@ -0,0 +1,50 @@ +--- +clusterName: "multi" +nodeGroup: "client" + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: multi-master-credentials + key: password + - name: xpack.security.enabled + value: "true" + - name: xpack.security.transport.ssl.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "true" + - name: xpack.security.transport.ssl.verification_mode + value: "certificate" + - name: xpack.security.transport.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.transport.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.transport.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + - name: xpack.security.http.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.http.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.http.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + +roles: [] + +persistence: + enabled: false + +# For client nodes, we also need to add an empty node.roles in elasticsearch.yml +# This is due to https://github.com/elastic/helm-charts/pull/1186#discussion_r631225687 +esConfig: + elasticsearch.yml: | + node.roles: [] + +secret: + enabled: false + +createCert: false +secretMounts: + - name: elastic-certificates + secretName: multi-master-certs + path: /usr/share/elasticsearch/config/certs diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/data.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/multi/data.yaml new file mode 100644 index 0000000..cd453d3 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/data.yaml @@ -0,0 +1,48 @@ +--- +clusterName: "multi" +nodeGroup: "data" + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: multi-master-credentials + key: password + - name: xpack.security.enabled + value: "true" + - name: xpack.security.transport.ssl.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "true" + - name: xpack.security.transport.ssl.verification_mode + value: "certificate" + - name: xpack.security.transport.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.transport.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.transport.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + - name: xpack.security.http.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.http.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.http.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + +roles: + - data + - data_content + - data_hot + - data_warm + - data_cold + - data_frozen + - ingest + +secret: + enabled: false + +createCert: false +secretMounts: + - name: elastic-certificates + secretName: multi-master-certs + path: /usr/share/elasticsearch/config/certs diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/master.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/multi/master.yaml new file mode 100644 index 0000000..bb4ea30 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/master.yaml @@ -0,0 +1,6 @@ +--- +clusterName: "multi" +nodeGroup: "master" + +roles: + - master diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/multi/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/multi/test/goss.yaml new file mode 100644 index 0000000..c365388 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/multi/test/goss.yaml @@ -0,0 +1,12 @@ +http: + https://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"cluster_name":"multi"' + - '"number_of_nodes":9' + - '"number_of_data_nodes":3' diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/Makefile new file mode 100644 index 0000000..e7b20c5 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-networkpolicy +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/values.yaml new file mode 100644 index 0000000..1963d20 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/networkpolicy/values.yaml @@ -0,0 +1,37 @@ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/openshift/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/Makefile new file mode 100644 index 0000000..078c33c --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/Makefile @@ -0,0 +1,13 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := elasticsearch + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/openshift/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/README.md new file mode 100644 index 0000000..d71093d --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/README.md @@ -0,0 +1,24 @@ +# OpenShift + +This example deploy a 3 nodes Elasticsearch 8.4.1 cluster on [OpenShift][] +using [custom values][]. + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/values.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/test/goss.yaml +[openshift]: https://www.openshift.com/ diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/openshift/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/test/goss.yaml new file mode 100644 index 0000000..0948fb5 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/test/goss.yaml @@ -0,0 +1,20 @@ +http: + https://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"number" : "8.4.1"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/openshift/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/values.yaml new file mode 100644 index 0000000..8a21126 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/openshift/values.yaml @@ -0,0 +1,11 @@ +--- + +securityContext: + runAsUser: null + +podSecurityContext: + fsGroup: null + runAsUser: null + +sysctlInitContainer: + enabled: false diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/security/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/security/Makefile new file mode 100644 index 0000000..78726e6 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/security/Makefile @@ -0,0 +1,36 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-security +ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: secrets install goss + +purge: + kubectl delete secrets elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true + helm del $(RELEASE) + +pull-elasticsearch-image: + docker pull $(ELASTICSEARCH_IMAGE) + +secrets: + docker rm -f elastic-helm-charts-certs || true + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true + docker run --name elastic-helm-charts-certs -i -w /tmp \ + $(ELASTICSEARCH_IMAGE) \ + /bin/sh -c " \ + elasticsearch-certutil ca --out /tmp/elastic-stack-ca.p12 --pass '' && \ + elasticsearch-certutil cert --name security-master --dns security-master --ca /tmp/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /tmp/elastic-certificates.p12" && \ + docker cp elastic-helm-charts-certs:/tmp/elastic-certificates.p12 ./ && \ + docker rm -f elastic-helm-charts-certs && \ + openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \ + openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \ + kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \ + kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \ + kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \ + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/security/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/security/README.md new file mode 100644 index 0000000..012356f --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/security/README.md @@ -0,0 +1,29 @@ +# Security + +This example deploy a 3 nodes Elasticsearch 8.4.1 with authentication and +autogenerated certificates for TLS (see [values][]). + +Note that this configuration should be used for test only. For a production +deployment you should generate SSL certificates following the [official docs][]. + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/security-master 9200 + curl -u elastic:changeme https://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/test/goss.yaml +[official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-tls.html#node-certificates +[values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/values.yaml diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/security/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/security/test/goss.yaml new file mode 100644 index 0000000..e35393f --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/security/test/goss.yaml @@ -0,0 +1,44 @@ +http: + https://security-master:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200/: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "security"' + - "You Know, for Search" + + https://localhost:9200/_license: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "active" + - "basic" + +file: + /usr/share/elasticsearch/config/elasticsearch.yml: + exists: true + contains: + - "xpack.security.enabled: true" + - "xpack.security.transport.ssl.enabled: true" + - "xpack.security.transport.ssl.verification_mode: certificate" + - "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.enabled: true" + - "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/security/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/security/values.yaml new file mode 100644 index 0000000..e2b1c18 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/security/values.yaml @@ -0,0 +1,28 @@ +--- +clusterName: "security" +nodeGroup: "master" + +createCert: false + +roles: + - master + - ingest + - data + +protocol: https + +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + +secretMounts: + - name: elastic-certificates + secretName: elastic-certificates + path: /usr/share/elasticsearch/config/certs diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/Makefile b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/Makefile new file mode 100644 index 0000000..0bfddab --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/Makefile @@ -0,0 +1,19 @@ +default: test + +include ../../../helpers/examples.mk + +CHART := elasticsearch +RELEASE := helm-es-upgrade +FROM := 7.17.1 # upgrade from versions before 7.17.1 isn't compatible with 8.x + +install: + ../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM) + # Rolling upgrade doesn't work when upgrading from clusters with security disabled. + # This is because nodes with security enabled can't join a cluster with security disabled. + # Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled + kubectl delete pod --selector=app=upgrade-master + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/README.md b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/README.md new file mode 100644 index 0000000..ab19df7 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/README.md @@ -0,0 +1,17 @@ +# Upgrade + +This example will deploy a 3 node Elasticsearch cluster chart using an old chart +version, then upgrade it. + + +## Usage + +* Deploy and upgrade Elasticsearch chart with the default values: `make install` + + +## Testing + +You can also run [goss integration tests][] using `make test`. + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/upgrade/test/goss.yaml diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/test/goss.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/test/goss.yaml new file mode 100644 index 0000000..8a3c0ef --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/test/goss.yaml @@ -0,0 +1,22 @@ +http: + https://localhost:9200/_cluster/health: + status: 200 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + allow-insecure: true + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200: + status: 200 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + allow-insecure: true + timeout: 2000 + body: + - '"number" : "8.4.1"' + - '"cluster_name" : "upgrade"' + - "You Know, for Search" diff --git a/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/values.yaml b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/values.yaml new file mode 100644 index 0000000..461b100 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/examples/upgrade/values.yaml @@ -0,0 +1,6 @@ +--- +clusterName: upgrade +# Rolling upgrade doesn't work when upgrading from clusters with security disabled. +# This is because nodes with security enabled can't join a cluster with security disabled. +# Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled +updateStrategy: OnDelete diff --git a/ansible/roles/helm_install/files/elasticsearch/override-values.yaml b/ansible/roles/helm_install/files/elasticsearch/override-values.yaml new file mode 100644 index 0000000..f45a49b --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/override-values.yaml @@ -0,0 +1,26 @@ +createCert: false +esConfig: + elasticsearch.yml: | + xpack.security.enabled: false +volumeClaimTemplate: + resources: + requests: + storage: 50Gi +protocol: http +service: + type: NodePort + # nodePort: "" +tolerations: +- key: "dev/data-es" + operator: "Exists" + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-es + diff --git a/ansible/roles/helm_install/files/elasticsearch/schema/es-ddl.sh b/ansible/roles/helm_install/files/elasticsearch/schema/es-ddl.sh new file mode 100755 index 0000000..9df354b --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/schema/es-ddl.sh @@ -0,0 +1,310 @@ +#!/bin/bash + +#temp=$(kubectl get svc -n dsk-middle -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +#export ES_NODEPORT=${temp:5:(-4)} +ES_NODEPORT=`kubectl get svc -n dsk-middle |grep elasticsearch | grep NodePort | awk -F '9200:' {'print $2'} | awk -F '/' '{print $1}'` +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep control-plane | awk {'print $6'} | head -1) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true +SECURE=false + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +echo 'cat indices' +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + + +######### Elasticsearch template 정의 ######### +echo 'ilm policy trace_span' +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/trace_span' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "3d", + "actions": { + "delete": {} + } + } + } + } +}' +echo '\n' + +echo 'template trace_span' +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/trace_span' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "trace_span-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "trace_span" + } + } + }, + "mappings": { + "properties": { + "tenantId": { + "type": "keyword" + }, + "userId": { + "type": "keyword" + }, + "hostKey": { + "type": "keyword" + }, + "agentKey": { + "type": "keyword" + }, + "agentKind": { + "type": "keyword" + }, + "agentVersion": { + "type": "keyword" + }, + "traceStreamID": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "traceID": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "spanID": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "operationName": { + "type": "keyword" + }, + "processID": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "processStreamID": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "references": { + "properties": { + "trace_id": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "span_id": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "tags": { + "properties": { + "type": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "value": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "key": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "v_type": { + "type": "long" + } + } + }, + "logs": { + "properties": { + "fields": { + "properties": { + "v_str": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "key": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "timestamp": { + "type": "date" + } + } + } + } + } +}' +echo '\n' + +## log-message +echo 'ilm policy log-message' +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/log-message' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "3d", + "actions": { + "delete": {} + } + } + } + } +}' +echo '\n' + +echo 'template log-message' +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/log-message' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "log-message-*" + ], + "settings": { + "index": { + "lifecycle": { + "name": "log-message" + }, + "refresh_interval": "1s", + "number_of_shards": "2", + "number_of_replicas": "1" + } + }, + "mappings": { + "properties": { + "usr_app_kind": { + "type": "keyword" + }, + "dsk_usr_id": { + "type": "keyword" + }, + "dsk_agent_name": { + "type": "keyword" + }, + "dsk_agent_key": { + "type": "keyword" + }, + "usr_tag": { + "type": "keyword" + }, + "dsk_host_key": { + "type": "keyword" + }, + "usr_app_name": { + "type": "keyword" + }, + "dsk_cluster_id": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "dsk_usr_tenant": { + "type": "keyword" + }, + "dsk_log_path": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "dsk_timestamp": { + "type": "long" + } + } + } +}' +echo '\n' diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/NOTES.txt b/ansible/roles/helm_install/files/elasticsearch/templates/NOTES.txt new file mode 100755 index 0000000..752526f --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/NOTES.txt @@ -0,0 +1,8 @@ +1. Watch all cluster members come up. + $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w +2. Retrieve elastic user's password. + $ kubectl get secrets --namespace={{ .Release.Namespace }} {{ template "elasticsearch.uname" . }}-credentials -ojsonpath='{.data.password}' | base64 -d +{{- if .Values.tests.enabled }} +3. Test cluster health using Helm test. + $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/_helpers.tpl b/ansible/roles/helm_install/files/elasticsearch/templates/_helpers.tpl new file mode 100644 index 0000000..b47e2fe --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,97 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elasticsearch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "elasticsearch.uname" -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{- else -}} +{{ .Values.nameOverride }}-{{ .Values.nodeGroup }} +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- end -}} + +{{/* +Generate certificates when the secret doesn't exist +*/}} +{{- define "elasticsearch.gen-certs" -}} +{{- $certs := lookup "v1" "Secret" .Release.Namespace ( printf "%s-certs" (include "elasticsearch.uname" . ) ) -}} +{{- if $certs -}} +tls.crt: {{ index $certs.data "tls.crt" }} +tls.key: {{ index $certs.data "tls.key" }} +ca.crt: {{ index $certs.data "ca.crt" }} +{{- else -}} +{{- $altNames := list ( include "elasticsearch.masterService" . ) ( printf "%s.%s" (include "elasticsearch.masterService" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "elasticsearch.masterService" .) .Release.Namespace ) -}} +{{- $ca := genCA "elasticsearch-ca" 365 -}} +{{- $cert := genSignedCert ( include "elasticsearch.masterService" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | toString | b64enc }} +tls.key: {{ $cert.Key | toString | b64enc }} +ca.crt: {{ $ca.Cert | toString | b64enc }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.masterService" -}} +{{- if empty .Values.masterService -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-master +{{- else -}} +{{ .Values.nameOverride }}-master +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- else -}} +{{ .Values.masterService }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.endpoints" -}} +{{- $replicas := int (toString (.Values.replicas)) }} +{{- $uname := (include "elasticsearch.uname" .) }} + {{- range $i, $e := untilStep 0 $replicas 1 -}} +{{ $uname }}-{{ $i }}, + {{- end -}} +{{- end -}} + +{{- define "elasticsearch.roles" -}} +{{- range $.Values.roles -}} +{{ . }}, +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.esMajorVersion" -}} +{{- if .Values.esMajorVersion -}} +{{ .Values.esMajorVersion }} +{{- else -}} +{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} + {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} +{{ $version }} + {{- else -}} +8 + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Use the fullname if the serviceAccount value is not set +*/}} +{{- define "elasticsearch.serviceAccount" -}} +{{- .Values.rbac.serviceAccountName | default (include "elasticsearch.uname" .) -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/configmap.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/configmap.yaml new file mode 100644 index 0000000..fd1ad30 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/configmap.yaml @@ -0,0 +1,34 @@ +{{- if .Values.esConfig }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-config + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} +{{- if .Values.esJvmOptions }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-jvm-options + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esJvmOptions }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/ingress.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/ingress.yaml new file mode 100644 index 0000000..e60cebf --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/ingress.yaml @@ -0,0 +1,64 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- $httpPort := .Values.httpPort -}} +{{- $pathtype := .Values.ingress.pathtype -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className | quote }} + {{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- if .ingressPath }} + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- else }} +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end}} + rules: + {{- range .Values.ingress.hosts }} + {{- if $ingressPath }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ $httpPort }} + {{- else }} + - host: {{ .host }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ .servicePort | default $httpPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/networkpolicy.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/networkpolicy.yaml new file mode 100644 index 0000000..62bb1bd --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +spec: + podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + ingress: # Allow inbound connections + +{{- if .Values.networkPolicy.http.enabled }} + # For HTTP access + - ports: + - port: {{ .Values.httpPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-http-client: "true" +{{- with .Values.networkPolicy.http.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.http.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} + +{{- if .Values.networkPolicy.transport.enabled }} + # For transport access + - ports: + - port: {{ .Values.transportPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-transport-client: "true" +{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.transport.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} + # Or from other ElasticSearch Pods + - podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} + +{{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/poddisruptionbudget.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..6d0bdf3 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +{{- if .Values.maxUnavailable }} +{{- if .Capabilities.APIVersions.Has "policy/v1" -}} +apiVersion: policy/v1 +{{- else}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: "{{ template "elasticsearch.uname" . }}-pdb" +spec: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/podsecuritypolicy.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..e22e75c --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podSecurityPolicy.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- if .Capabilities.APIVersions.Has "policy/v1" -}} +apiVersion: policy/v1 +{{- else}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodSecurityPolicy +metadata: + name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +spec: +{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/role.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..d3a7ee3 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/role.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +rules: + - apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + {{- if eq .Values.podSecurityPolicy.name "" }} + - {{ $fullName | quote }} + {{- else }} + - {{ .Values.podSecurityPolicy.name | quote }} + {{- end }} + verbs: + - use +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/rolebinding.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..e0ecced --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +subjects: + - kind: ServiceAccount + name: "{{ template "elasticsearch.serviceAccount" . }}" + namespace: {{ .Release.Namespace | quote }} +roleRef: + kind: Role + name: {{ $fullName | quote }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/secret-cert.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/secret-cert.yaml new file mode 100644 index 0000000..97d8dec --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/secret-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.createCert }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "elasticsearch.uname" . }}-certs + labels: + app: {{ template "elasticsearch.uname" . }} + chart: "{{ .Chart.Name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "elasticsearch.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/secret.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/secret.yaml new file mode 100644 index 0000000..cbdcbba --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/secret.yaml @@ -0,0 +1,23 @@ +{{- if .Values.secret.enabled -}} +{{- $passwordValue := (randAlphaNum 16) | b64enc | quote }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "elasticsearch.uname" . }}-credentials + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} +type: Opaque +data: + username: {{ "elastic" | b64enc }} + {{- if .Values.secret.password }} + password: {{ .Values.secret.password | b64enc }} + {{- else }} + password: {{ $passwordValue }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/service.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/service.yaml new file mode 100644 index 0000000..5fe52eb --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/service.yaml @@ -0,0 +1,78 @@ +{{- if .Values.service.enabled -}} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }} +{{- else }} + name: {{ template "elasticsearch.uname" . }} +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4}} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + protocol: TCP + port: {{ .Values.httpPort }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + - name: {{ .Values.service.transportPortName | default "transport" }} + protocol: TCP + port: {{ .Values.transportPort }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} +{{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +{{- end }} +{{- end }} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }}-headless +{{- else }} + name: {{ template "elasticsearch.uname" . }}-headless +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labelsHeadless }} +{{ toYaml .Values.service.labelsHeadless | indent 4 }} +{{- end }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve + # Create endpoints also if the related pod isn't ready + publishNotReadyAddresses: true + selector: + app: "{{ template "elasticsearch.uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + port: {{ .Values.httpPort }} + - name: {{ .Values.service.transportPortName | default "transport" }} + port: {{ .Values.transportPort }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/serviceaccount.yaml new file mode 100644 index 0000000..a7ef847 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ template "elasticsearch.serviceAccount" . }}" + annotations: + {{- with .Values.rbac.serviceAccountAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/statefulset.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/statefulset.yaml new file mode 100644 index 0000000..64fd423 --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/statefulset.yaml @@ -0,0 +1,427 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" +spec: + serviceName: {{ template "elasticsearch.uname" . }}-headless + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + replicas: {{ .Values.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ template "elasticsearch.uname" . }} + {{- if .Values.persistence.labels.enabled }} + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: +{{ toYaml .Values.volumeClaimTemplate | indent 6 }} + {{- end }} + template: + metadata: + name: "{{ template "elasticsearch.uname" . }}" + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if or .Values.esConfig .Values.esJvmOptions }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.fsGroup }} + fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup + {{- end }} + {{- if or .Values.rbac.create .Values.rbac.serviceAccountName }} + serviceAccountName: "{{ template "elasticsearch.serviceAccount" . }}" + {{- end }} + automountServiceAccountToken: {{ .Values.rbac.automountToken }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + affinity: + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" .}}" + topologyKey: {{ .Values.antiAffinityTopologyKey }} + {{- else if eq .Values.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: {{ .Values.antiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" . }}" + {{- end }} + {{- with .Values.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- if .defaultMode }} + defaultMode: {{ .defaultMode }} + {{- end }} + {{- end }} + {{- if .Values.esConfig }} + - name: esconfig + configMap: + name: {{ template "elasticsearch.uname" . }}-config + {{- end }} + {{- if .Values.esJvmOptions }} + - name: esjvmoptions + configMap: + name: {{ template "elasticsearch.uname" . }}-jvm-options + {{- end }} + {{- if .Values.createCert }} + - name: elasticsearch-certs + secret: + secretName: {{ template "elasticsearch.uname" . }}-certs + {{- end }} +{{- if .Values.keystore }} + - name: keystore + emptyDir: {} + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + secret: {{ toYaml . | nindent 12 }} + {{- end }} +{{ end }} + {{- if .Values.extraVolumes }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumes) }} +{{ tpl .Values.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + {{- if .Values.hostAliases }} + hostAliases: {{ toYaml .Values.hostAliases | nindent 8 }} + {{- end }} + {{- if or (.Values.extraInitContainers) (.Values.sysctlInitContainer.enabled) (.Values.keystore) }} + initContainers: + {{- if .Values.sysctlInitContainer.enabled }} + - name: configure-sysctl + securityContext: + runAsUser: 0 + privileged: true + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} +{{ if .Values.keystore }} + - name: keystore + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - bash + - -c + - | + set -euo pipefail + + elasticsearch-keystore create + + for i in /tmp/keystoreSecrets/*/*; do + key=$(basename $i) + echo "Adding file $i to keystore key $key" + elasticsearch-keystore add-file "$key" "$i" + done + + # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup + if [ ! -z ${ELASTIC_PASSWORD+x} ]; then + echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' + echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password + fi + + cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ + env: {{ toYaml .Values.extraEnvs | nindent 10 }} + envFrom: {{ toYaml .Values.envFrom | nindent 10 }} + resources: {{ toYaml .Values.initResources | nindent 10 }} + volumeMounts: + - name: keystore + mountPath: /tmp/keystore + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + mountPath: /tmp/keystoreSecrets/{{ .secretName }} + {{- end }} +{{ end }} + {{- if .Values.extraInitContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraInitContainers | indent 6 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: "{{ template "elasticsearch.name" . }}" + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + readinessProbe: + exec: + command: + - bash + - -c + - | + set -e + + # Exit if ELASTIC_PASSWORD in unset + if [ -z "${ELASTIC_PASSWORD}" ]; then + echo "ELASTIC_PASSWORD variable is missing, exiting" + exit 1 + fi + + # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) + # Once it has started only check that the node itself is responding + START_FILE=/tmp/.es_start_file + + # Disable nss cache to avoid filling dentry cache when calling curl + # This is required with Elasticsearch Docker using nss < 3.52 + export NSS_SDB_USE_CACHE=no + + http () { + local path="${1}" + local args="${2}" + set -- -XGET -s + + if [ "$args" != "" ]; then + set -- "$@" $args + fi + + set -- "$@" -u "elastic:${ELASTIC_PASSWORD}" + + curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" + } + + if [ -f "${START_FILE}" ]; then + echo 'Elasticsearch is already running, lets check the node is healthy' + HTTP_CODE=$(http "/" "-w %{http_code}") + RC=$? + if [[ ${RC} -ne 0 ]]; then + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" + exit ${RC} + fi + # ready if HTTP code 200, 503 is tolerable if ES version is 6.x + if [[ ${HTTP_CODE} == "200" ]]; then + exit 0 + elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then + exit 0 + else + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" + exit 1 + fi + + else + echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then + touch ${START_FILE} + exit 0 + else + echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + exit 1 + fi + fi +{{ toYaml .Values.readinessProbe | indent 10 }} + ports: + - name: http + containerPort: {{ .Values.httpPort }} + - name: transport + containerPort: {{ .Values.transportPort }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if has "master" .Values.roles }} + - name: cluster.initial_master_nodes + value: "{{ template "elasticsearch.endpoints" . }}" + {{- end }} + {{- if gt (len (include "elasticsearch.roles" .)) 0 }} + - name: node.roles + value: "{{ template "elasticsearch.roles" . }}" + {{- end }} + {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} + - name: discovery.zen.ping.unicast.hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- else }} + - name: discovery.seed_hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- end }} + - name: cluster.name + value: "{{ .Values.clusterName }}" + - name: network.host + value: "{{ .Values.networkHost }}" + {{- if .Values.secret.enabled }} + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "elasticsearch.uname" . }}-credentials + key: password + {{- end }} + {{- if .Values.esJavaOpts }} + - name: ES_JAVA_OPTS + value: "{{ .Values.esJavaOpts }}" + {{- end }} + {{- if .Values.createCert }} + - name: xpack.security.enabled + value: "true" + - name: xpack.security.transport.ssl.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "true" + - name: xpack.security.transport.ssl.verification_mode + value: "certificate" + - name: xpack.security.transport.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.transport.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.transport.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + - name: xpack.security.http.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.http.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.http.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + {{- end }} +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} +{{- if .Values.envFrom }} + envFrom: +{{ toYaml .Values.envFrom | indent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: "{{ template "elasticsearch.uname" . }}" + mountPath: /usr/share/elasticsearch/data + {{- end }} + {{- if .Values.createCert }} + - name: elasticsearch-certs + mountPath: /usr/share/elasticsearch/config/certs + readOnly: true + {{- end }} +{{ if .Values.keystore }} + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore +{{ end }} + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.esConfig }} + - name: esconfig + mountPath: /usr/share/elasticsearch/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- range $path, $config := .Values.esJvmOptions }} + - name: esjvmoptions + mountPath: /usr/share/elasticsearch/config/jvm.options.d/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- if .Values.extraVolumeMounts }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} +{{ tpl .Values.extraVolumeMounts . | indent 10 }} + {{- else }} +{{ toYaml .Values.extraVolumeMounts | indent 10 }} + {{- end }} + {{- end }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} + {{- if .Values.extraContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraContainers) }} +{{ tpl .Values.extraContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraContainers | indent 6 }} + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/elasticsearch/templates/test/test-elasticsearch-health.yaml b/ansible/roles/helm_install/files/elasticsearch/templates/test/test-elasticsearch-health.yaml new file mode 100644 index 0000000..d0890fb --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/templates/test/test-elasticsearch-health.yaml @@ -0,0 +1,50 @@ +{{- if .Values.tests.enabled -}} +--- +apiVersion: v1 +kind: Pod +metadata: +{{- if .Values.healthNameOverride }} + name: {{ .Values.healthNameOverride | quote }} +{{- else }} + name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": hook-succeeded +spec: + securityContext: +{{ toYaml .Values.podSecurityContext | indent 4 }} + containers: +{{- if .Values.healthNameOverride }} + - name: {{ .Values.healthNameOverride | quote }} +{{- else }} + - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + env: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "elasticsearch.uname" . }}-credentials + key: password + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - "-c" + - | + #!/usr/bin/env bash -e + curl -XGET --fail --cacert /usr/share/elasticsearch/config/certs/tls.crt -u "elastic:${ELASTIC_PASSWORD}" https://'{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' + volumeMounts: + - name: elasticsearch-certs + mountPath: /usr/share/elasticsearch/config/certs + readOnly: true + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 4 }} + {{- end }} + restartPolicy: Never + volumes: + - name: elasticsearch-certs + secret: + secretName: {{ template "elasticsearch.uname" . }}-certs +{{- end -}} diff --git a/ansible/roles/helm_install/files/elasticsearch/tests/elasticsearch_test.py b/ansible/roles/helm_install/files/elasticsearch/tests/elasticsearch_test.py new file mode 100644 index 0000000..00e9abd --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/tests/elasticsearch_test.py @@ -0,0 +1,1504 @@ +import os +import sys + +sys.path.insert(1, os.path.join(sys.path[0], "../../helpers")) +from helpers import helm_template +import yaml + +clusterName = "elasticsearch" +nodeGroup = "master" +uname = clusterName + "-" + nodeGroup + + +def test_defaults(): + config = """ + """ + + r = helm_template(config) + + # Statefulset + assert r["statefulset"][uname]["spec"]["replicas"] == 3 + assert r["statefulset"][uname]["spec"]["updateStrategy"] == { + "type": "RollingUpdate" + } + assert r["statefulset"][uname]["spec"]["podManagementPolicy"] == "Parallel" + assert r["statefulset"][uname]["spec"]["serviceName"] == uname + "-headless" + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "podAntiAffinity" + ]["requiredDuringSchedulingIgnoredDuringExecution"][0] == { + "labelSelector": { + "matchExpressions": [{"key": "app", "operator": "In", "values": [uname]}] + }, + "topologyKey": "kubernetes.io/hostname", + } + + # Default environment variables + env_vars = [ + { + "name": "node.name", + "valueFrom": {"fieldRef": {"fieldPath": "metadata.name"}}, + }, + { + "name": "cluster.initial_master_nodes", + "value": uname + "-0," + uname + "-1," + uname + "-2,", + }, + {"name": "discovery.seed_hosts", "value": uname + "-headless"}, + {"name": "network.host", "value": "0.0.0.0"}, + {"name": "cluster.name", "value": clusterName}, + { + "name": "node.roles", + "value": "master,data,data_content,data_hot,data_warm,data_cold,ingest,ml,remote_cluster_client,transform,", + }, + ] + + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + for env in env_vars: + assert env in c["env"] + + # Image + assert c["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert c["imagePullPolicy"] == "IfNotPresent" + assert c["name"] == "elasticsearch" + + # Ports + assert c["ports"][0] == {"name": "http", "containerPort": 9200} + assert c["ports"][1] == {"name": "transport", "containerPort": 9300} + + # Health checks + assert c["readinessProbe"]["failureThreshold"] == 3 + assert c["readinessProbe"]["initialDelaySeconds"] == 10 + assert c["readinessProbe"]["periodSeconds"] == 10 + assert c["readinessProbe"]["successThreshold"] == 3 + assert c["readinessProbe"]["timeoutSeconds"] == 5 + + assert "curl" in c["readinessProbe"]["exec"]["command"][-1] + assert "https://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + # Resources + assert c["resources"] == { + "requests": {"cpu": "1000m", "memory": "2Gi"}, + "limits": {"cpu": "1000m", "memory": "2Gi"}, + } + + # Mounts + assert c["volumeMounts"][0]["mountPath"] == "/usr/share/elasticsearch/data" + assert c["volumeMounts"][0]["name"] == uname + + # volumeClaimTemplates + v = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0] + assert v["metadata"]["name"] == uname + assert "labels" not in v["metadata"] + assert v["spec"]["accessModes"] == ["ReadWriteOnce"] + assert v["spec"]["resources"]["requests"]["storage"] == "30Gi" + + # Init container + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + assert i["name"] == "configure-sysctl" + assert i["command"] == ["sysctl", "-w", "vm.max_map_count=262144"] + assert i["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert i["securityContext"] == {"privileged": True, "runAsUser": 0} + + # Other + assert r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"] == { + "fsGroup": 1000, + "runAsUser": 1000, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "terminationGracePeriodSeconds" + ] + == 120 + ) + + # Pod disruption budget + assert r["poddisruptionbudget"][uname + "-pdb"]["spec"]["maxUnavailable"] == 1 + + # Service + s = r["service"][uname] + assert s["metadata"]["name"] == uname + assert s["metadata"]["annotations"] == {} + assert s["spec"]["type"] == "ClusterIP" + assert s["spec"]["publishNotReadyAddresses"] == False + assert len(s["spec"]["ports"]) == 2 + assert s["spec"]["ports"][0] == {"name": "http", "port": 9200, "protocol": "TCP"} + assert s["spec"]["ports"][1] == { + "name": "transport", + "port": 9300, + "protocol": "TCP", + } + assert "loadBalancerSourceRanges" not in s["spec"] + + # Headless Service + h = r["service"][uname + "-headless"] + assert h["spec"]["clusterIP"] == "None" + assert h["spec"]["publishNotReadyAddresses"] == True + assert h["spec"]["ports"][0]["name"] == "http" + assert h["spec"]["ports"][0]["port"] == 9200 + assert h["spec"]["ports"][1]["name"] == "transport" + assert h["spec"]["ports"][1]["port"] == 9300 + + # Empty customizable defaults + assert "imagePullSecrets" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "tolerations" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "nodeSelector" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "ingress" not in r + assert "hostAliases" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_increasing_the_replicas(): + config = """ +replicas: 5 +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["replicas"] == 5 + + +def test_disabling_pod_disruption_budget(): + config = """ +maxUnavailable: false +""" + r = helm_template(config) + assert "poddisruptionbudget" not in r + + +def test_overriding_the_image_and_tag(): + config = """ +image: customImage +imageTag: 6.2.4 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["image"] + == "customImage:6.2.4" + ) + + +def test_set_initial_master_nodes(): + config = """ +roles: + - master +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert { + "name": "cluster.initial_master_nodes", + "value": "elasticsearch-master-0," + + "elasticsearch-master-1," + + "elasticsearch-master-2,", + } in env + + for e in env: + assert e["name"] != "discovery.zen.minimum_master_nodes" + + +def test_dont_set_initial_master_nodes_if_not_master(): + config = """ +roles: + - data +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + for e in env: + assert e["name"] != "cluster.initial_master_nodes" + + +def test_set_discovery_seed_host(): + config = """ +roles: + - master +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert { + "name": "discovery.seed_hosts", + "value": "elasticsearch-master-headless", + } in env + + for e in env: + assert e["name"] != "discovery.zen.ping.unicast.hosts" + + +def test_adding_extra_env_vars(): + config = """ +extraEnvs: + - name: hello + value: world +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert {"name": "hello", "value": "world"} in env + + +def test_adding_env_from(): + config = """ +envFrom: +- secretRef: + name: secret-name +""" + r = helm_template(config) + secretRef = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "envFrom" + ][0]["secretRef"] + assert secretRef == {"name": "secret-name"} + + +def test_adding_a_extra_volume_with_volume_mount(): + config = """ +extraVolumes: | + - name: extras + emptyDir: {} +extraVolumeMounts: | + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_volume_with_volume_mount_as_yaml(): + config = """ +extraVolumes: + - name: extras + emptyDir: {} +extraVolumeMounts: + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_container(): + config = """ +extraContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_container_as_yaml(): + config = """ +extraContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_init_container(): + config = """ +extraInitContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_adding_a_extra_init_container_as_yaml(): + config = """ +extraInitContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_sysctl_init_container_disabled(): + config = """ +sysctlInitContainer: + enabled: false +""" + r = helm_template(config) + assert "initContainers" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_sysctl_init_container_enabled(): + config = """ +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["name"] == "configure-sysctl" + + +def test_sysctl_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["image"] == "customImage:6.2.4" + assert initContainers[0]["imagePullPolicy"] == "Never" + + +def test_adding_storageclass_annotation_to_volumeclaimtemplate(): + config = """ +persistence: + annotations: + volume.beta.kubernetes.io/storage-class: id +""" + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + assert annotations["volume.beta.kubernetes.io/storage-class"] == "id" + + +def test_adding_multiple_persistence_annotations(): + config = """ + persistence: + annotations: + hello: world + world: hello + """ + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + + assert annotations["hello"] == "world" + assert annotations["world"] == "hello" + + +def test_enabling_persistence_label_in_volumeclaimtemplate(): + config = """ +persistence: + labels: + enabled: true +""" + r = helm_template(config) + volume_claim_template_labels = r["statefulset"][uname]["spec"][ + "volumeClaimTemplates" + ][0]["metadata"]["labels"] + statefulset_labels = r["statefulset"][uname]["metadata"]["labels"] + expected_labels = statefulset_labels + # heritage label shouldn't be present in volumeClaimTemplates labels + expected_labels.pop("heritage") + assert volume_claim_template_labels == expected_labels + + +def test_adding_a_secret_mount(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "name": "elastic-certificates", + } + assert { + "name": "elastic-certificates", + "secret": {"secretName": "elastic-certs"}, + } in s["volumes"] + + +def test_adding_a_secret_mount_with_subpath(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_a_secret_mount_with_default_mode(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt + defaultMode: 0755 +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_image_pull_secrets(): + config = """ +imagePullSecrets: + - name: test-registry +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["imagePullSecrets"][0][ + "name" + ] + == "test-registry" + ) + + +def test_adding_tolerations(): + config = """ +tolerations: +- key: "key1" + operator: "Equal" + value: "value1" + effect: "NoExecute" + tolerationSeconds: 3600 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["tolerations"][0]["key"] + == "key1" + ) + + +def test_adding_pod_annotations(): + config = """ +podAnnotations: + iam.amazonaws.com/role: es-role +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"][ + "iam.amazonaws.com/role" + ] + == "es-role" + ) + + +def test_adding_serviceaccount_annotations(): + config = """ +rbac: + create: true + serviceAccountAnnotations: + eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount +""" + r = helm_template(config) + assert ( + r["serviceaccount"][uname]["metadata"]["annotations"][ + "eks.amazonaws.com/role-arn" + ] + == "arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount" + ) + + +def test_adding_a_node_selector(): + config = """ +nodeSelector: + disktype: ssd +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["nodeSelector"]["disktype"] + == "ssd" + ) + + +def test_adding_resources_to_initcontainer(): + config = """ +initResources: + limits: + cpu: "25m" + memory: "128Mi" + requests: + cpu: "25m" + memory: "128Mi" +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + + assert i["resources"] == { + "requests": {"cpu": "25m", "memory": "128Mi"}, + "limits": {"cpu": "25m", "memory": "128Mi"}, + } + + +def test_adding_a_node_affinity(): + config = """ +nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: mylabel + operator: In + values: + - myvalue +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "nodeAffinity" + ] == { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 100, + "preference": { + "matchExpressions": [ + {"key": "mylabel", "operator": "In", "values": ["myvalue"]} + ] + }, + } + ] + } + + +def test_adding_an_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: elasticsearch.elastic.co + paths: + - path: / + - host: '' + paths: + - path: / + - path: /mypath + servicePort: 8888 + - host: elasticsearch.hello.there + paths: + - path: / + servicePort: 9999 + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][0]["http"]["paths"][0]["backend"]["service"]["port"]["number"] + == 9200 + ) + assert i["rules"][1]["host"] == None + assert i["rules"][1]["http"]["paths"][0]["path"] == "/" + assert i["rules"][1]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][1]["http"]["paths"][0]["backend"]["service"]["port"]["number"] + == 9200 + ) + assert i["rules"][1]["http"]["paths"][1]["path"] == "/mypath" + assert i["rules"][1]["http"]["paths"][1]["backend"]["service"]["name"] == uname + assert ( + i["rules"][1]["http"]["paths"][1]["backend"]["service"]["port"]["number"] + == 8888 + ) + assert i["rules"][2]["host"] == "elasticsearch.hello.there" + assert i["rules"][2]["http"]["paths"][0]["path"] == "/" + assert i["rules"][2]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][2]["http"]["paths"][0]["backend"]["service"]["port"]["number"] + == 9999 + ) + + +def test_adding_a_deprecated_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + path: / + hosts: + - elasticsearch.elastic.co + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][0]["http"]["paths"][0]["backend"]["service"]["port"]["number"] + == 9200 + ) + + +def test_changing_the_protocol(): + config = """ +protocol: https +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "https://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + +def test_changing_the_cluster_health_status(): + config = """ +clusterHealthCheckParams: 'wait_for_no_initializing_shards=true&timeout=60s' +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert ( + "/_cluster/health?wait_for_no_initializing_shards=true&timeout=60s" + in c["readinessProbe"]["exec"]["command"][-1] + ) + + +def test_adding_in_es_config(): + config = """ +esConfig: + elasticsearch.yml: | + key: + nestedkey: value + dot.notation: test + + log4j2.properties: | + appender.rolling.name = rolling +""" + r = helm_template(config) + c = r["configmap"][uname + "-config"]["data"] + + assert "elasticsearch.yml" in c + assert "log4j2.properties" in c + + assert "nestedkey: value" in c["elasticsearch.yml"] + assert "dot.notation: test" in c["elasticsearch.yml"] + + assert "appender.rolling.name = rolling" in c["log4j2.properties"] + + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert { + "configMap": {"name": "elasticsearch-master-config"}, + "name": "esconfig", + } in s["volumes"] + assert { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.yml", + "name": "esconfig", + "subPath": "elasticsearch.yml", + } in s["containers"][0]["volumeMounts"] + assert { + "mountPath": "/usr/share/elasticsearch/config/log4j2.properties", + "name": "esconfig", + "subPath": "log4j2.properties", + } in s["containers"][0]["volumeMounts"] + + assert ( + "configchecksum" + in r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"] + ) + + +def test_adding_in_jvm_options(): + config = """ +esJvmOptions: + processors.options: | + -XX:ActiveProcessorCount=3 +""" + r = helm_template(config) + c = r["configmap"][uname + "-jvm-options"]["data"] + + assert "processors.options" in c + + assert "-XX:ActiveProcessorCount=3" in c["processors.options"] + + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert { + "configMap": {"name": "elasticsearch-master-jvm-options"}, + "name": "esjvmoptions", + } in s["volumes"] + assert { + "mountPath": "/usr/share/elasticsearch/config/jvm.options.d/processors.options", + "name": "esjvmoptions", + "subPath": "processors.options", + } in s["containers"][0]["volumeMounts"] + + assert ( + "configchecksum" + in r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"] + ) + + +def test_dont_add_data_volume_when_persistance_is_disabled(): + config = """ +persistence: + enabled: false +""" + r = helm_template(config) + assert "volumeClaimTemplates" not in r["statefulset"][uname]["spec"] + assert { + "name": "elasticsearch-master", + "mountPath": "/usr/share/elasticsearch/data", + } not in r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "volumeMounts" + ] + + +def test_priority_class_name(): + config = """ +priorityClassName: "" +""" + r = helm_template(config) + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "priorityClassName" not in spec + + config = """ +priorityClassName: "highest" +""" + r = helm_template(config) + priority_class_name = r["statefulset"][uname]["spec"]["template"]["spec"][ + "priorityClassName" + ] + assert priority_class_name == "highest" + + +def test_scheduler_name(): + r = helm_template("") + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "schedulerName" not in spec + + config = """ +schedulerName: "stork" +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["schedulerName"] == "stork" + ) + + +def test_disabling_non_headless_service(): + config = "" + + r = helm_template(config) + + assert uname in r["service"] + + config = """ +service: + enabled: false +""" + + r = helm_template(config) + + assert uname not in r["service"] + + +def test_enabling_service_publishNotReadyAddresses(): + config = """ + service: + publishNotReadyAddresses: true + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["publishNotReadyAddresses"] == True + + +def test_adding_a_nodePort(): + config = "" + + r = helm_template(config) + + assert "nodePort" not in r["service"][uname]["spec"]["ports"][0] + + config = """ + service: + nodePort: 30001 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["ports"][0]["nodePort"] == 30001 + + +def test_adding_a_loadBalancerIP(): + config = "" + + r = helm_template(config) + + assert "loadBalancerIP" not in r["service"][uname]["spec"] + + config = """ + service: + loadBalancerIP: 12.4.19.81 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["loadBalancerIP"] == "12.4.19.81" + + +def test_adding_an_externalTrafficPolicy(): + config = "" + + r = helm_template(config) + + assert "externalTrafficPolicy" not in r["service"][uname]["spec"] + + config = """ + service: + externalTrafficPolicy: Local + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["externalTrafficPolicy"] == "Local" + + +def test_adding_a_label_on_non_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname]["metadata"]["labels"] + + config = """ + service: + labels: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_a_label_on_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname + "-headless"]["metadata"]["labels"] + + config = """ + service: + labelsHeadless: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname + "-headless"]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_load_balancer_source_ranges(): + config = """ +service: + loadBalancerSourceRanges: + - 0.0.0.0/0 + """ + r = helm_template(config) + assert r["service"][uname]["spec"]["loadBalancerSourceRanges"][0] == "0.0.0.0/0" + + config = """ +service: + loadBalancerSourceRanges: + - 192.168.0.0/24 + - 192.168.1.0/24 + """ + r = helm_template(config) + ranges = r["service"][uname]["spec"]["loadBalancerSourceRanges"] + assert ranges[0] == "192.168.0.0/24" + assert ranges[1] == "192.168.1.0/24" + + +def test_lifecycle_hooks(): + config = "" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "lifecycle" not in c + + config = """ + lifecycle: + preStop: + exec: + command: ["/bin/bash","/preStop"] + """ + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + + assert c["lifecycle"]["preStop"]["exec"]["command"] == ["/bin/bash", "/preStop"] + + +def test_esMajorVersion_detect_default_version(): + config = "" + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_default_to_8_if_not_elastic_image(): + config = """ + image: notElastic + imageTag: 1.0.0 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_default_to_8_if_no_version_is_found(): + config = """ + imageTag: not_a_number + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_always_wins(): + config = """ + esMajorVersion: 7 + imageTag: 8.0.0 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "7" + + +def test_set_pod_security_context(): + config = "" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1000 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "runAsUser" + ] + == 1000 + ) + + config = """ + podSecurityContext: + fsGroup: 1001 + other: test + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1001 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"]["other"] + == "test" + ) + + +def test_fsGroup_backwards_compatability(): + config = """ + fsGroup: 1001 + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1001 + ) + + +def test_set_container_security_context(): + config = "" + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1000 + + config = """ + securityContext: + runAsUser: 1001 + other: test + """ + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1001 + assert c["securityContext"]["other"] == "test" + + +def test_adding_pod_labels(): + config = """ +labels: + app.kubernetes.io/name: elasticsearch +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["metadata"]["labels"]["app.kubernetes.io/name"] + == "elasticsearch" + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["labels"][ + "app.kubernetes.io/name" + ] + == "elasticsearch" + ) + + +def test_keystore_enable(): + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore", "emptyDir": {}} in s["volumes"] + + +def test_keystore_init_container(): + config = "" + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] != "keystore" + + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] == "keystore" + + +def test_keystore_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +keystore: + - secretName: test +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + assert i["image"] == "customImage:6.2.4" + assert i["imagePullPolicy"] == "Never" + + +def test_keystore_mount(): + config = """ +keystore: + - secretName: test +""" + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.keystore", + "subPath": "elasticsearch.keystore", + "name": "keystore", + } + + +def test_keystore_init_volume_mounts(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["initContainers"][-1]["volumeMounts"] == [ + {"mountPath": "/tmp/keystore", "name": "keystore"}, + {"mountPath": "/tmp/keystoreSecrets/test", "name": "keystore-test"}, + { + "mountPath": "/tmp/keystoreSecrets/test-with-custom-path", + "name": "keystore-test-with-custom-path", + }, + ] + + +def test_keystore_volumes(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore-test", "secret": {"secretName": "test"}} in s["volumes"] + + assert { + "name": "keystore-test-with-custom-path", + "secret": { + "secretName": "test-with-custom-path", + "items": [ + { + "key": "slack_url", + "path": "xpack.notification.slack.account.otheraccount.secure_url", + } + ], + }, + } in s["volumes"] + + +def test_pod_security_policy(): + ## Make sure the default config is not creating any resources + config = "" + resources = ("role", "rolebinding", "serviceaccount", "podsecuritypolicy") + r = helm_template(config) + for resource in resources: + assert resource not in r + assert ( + "serviceAccountName" not in r["statefulset"][uname]["spec"]["template"]["spec"] + ) + + ## Make sure all the resources are created with default values + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: true + name: "" +""" + r = helm_template(config) + for resource in resources: + assert resource in r + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": [uname], + } + assert r["rolebinding"][uname]["subjects"] == [ + {"kind": "ServiceAccount", "namespace": "default", "name": uname} + ] + assert r["rolebinding"][uname]["roleRef"] == { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Role", + "name": uname, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] + == uname + ) + psp_spec = r["podsecuritypolicy"][uname]["spec"] + assert psp_spec["privileged"] is True + + +def test_external_pod_security_policy(): + ## Make sure we can use an externally defined pod security policy + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: false + name: "customPodSecurityPolicy" +""" + resources = ("role", "rolebinding") + r = helm_template(config) + for resource in resources: + assert resource in r + + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": ["customPodSecurityPolicy"], + } + + +def test_external_service_account(): + ## Make sure we can use an externally defined service account + config = """ +rbac: + create: false + serviceAccountName: "customServiceAccountName" + +podSecurityPolicy: + create: false + name: "" +""" + resources = ("role", "rolebinding", "serviceaccount") + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] + == "customServiceAccountName" + ) + # When referencing an external service account we do not want any resources to be created. + for resource in resources: + assert resource not in r + + +def test_name_override(): + ## Make sure we can use a name override + config = """ +nameOverride: "customName" +""" + r = helm_template(config) + + assert "customName-master" in r["statefulset"] + assert "customName-master" in r["service"] + + +def test_full_name_override(): + ## Make sure we can use a full name override + config = """ +fullnameOverride: "customfullName" +""" + r = helm_template(config) + + assert "customfullName" in r["statefulset"] + assert "customfullName" in r["service"] + + +def test_initial_master_nodes_when_using_full_name_override(): + config = """ +fullnameOverride: "customfullName" +""" + r = helm_template(config) + env = r["statefulset"]["customfullName"]["spec"]["template"]["spec"]["containers"][ + 0 + ]["env"] + assert { + "name": "cluster.initial_master_nodes", + "value": "customfullName-0," + "customfullName-1," + "customfullName-2,", + } in env + + +def test_hostaliases(): + config = """ +hostAliases: +- ip: "127.0.0.1" + hostnames: + - "foo.local" + - "bar.local" +""" + r = helm_template(config) + hostAliases = r["statefulset"][uname]["spec"]["template"]["spec"]["hostAliases"] + assert {"ip": "127.0.0.1", "hostnames": ["foo.local", "bar.local"]} in hostAliases + + +def test_network_policy(): + config = """ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport + +""" + r = helm_template(config) + ingress = r["networkpolicy"][uname]["spec"]["ingress"] + pod_selector = r["networkpolicy"][uname]["spec"]["podSelector"] + http = ingress[0] + transport = ingress[1] + assert http["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-http-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ], + "matchLabels": {"role": "frontend-http"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-http"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ] + } + }, + ] + assert http["ports"][0]["port"] == 9200 + assert transport["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-transport-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ], + "matchLabels": {"role": "frontend-transport"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-transport"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ] + } + }, + {"podSelector": {"matchLabels": {"app": "elasticsearch-master"}}}, + ] + assert transport["ports"][0]["port"] == 9300 + assert pod_selector == { + "matchLabels": { + "app": "elasticsearch-master", + } + } + + +def test_default_automount_sa_token(): + config = """ +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] + == True + ) + + +def test_disable_automount_sa_token(): + config = """ +rbac: + automountToken: false +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] + == False + ) diff --git a/ansible/roles/helm_install/files/elasticsearch/values.yaml b/ansible/roles/helm_install/files/elasticsearch/values.yaml new file mode 100644 index 0000000..04e509f --- /dev/null +++ b/ansible/roles/helm_install/files/elasticsearch/values.yaml @@ -0,0 +1,356 @@ +--- +clusterName: "elasticsearch" +nodeGroup: "master" + +# The service that non master groups will try to connect to when joining the cluster +# This should be set to clusterName + "-" + nodeGroup for your master group +masterService: "" + +# Elasticsearch roles that will be applied to this nodeGroup +# These will be set as environment variables. E.g. node.roles=master +# https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles +roles: + - master + - data + - data_content + - data_hot + - data_warm + - data_cold + - ingest + - ml + - remote_cluster_client + - transform + +replicas: 3 +minimumMasterNodes: 2 + +esMajorVersion: "" + +# Allows you to add any config files in /usr/share/elasticsearch/config/ +# such as elasticsearch.yml and log4j2.properties +esConfig: {} +# elasticsearch.yml: | +# key: +# nestedkey: value +# log4j2.properties: | +# key = value + +createCert: true + +esJvmOptions: {} +# processors.options: | +# -XX:ActiveProcessorCount=3 + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# Allows you to load environment variables from kubernetes secret or config map +envFrom: [] +# - secretRef: +# name: env-secret +# - configMapRef: +# name: config-map + +# Disable it to use your own elastic-credential Secret. +secret: + enabled: true + password: "" # generated randomly if not defined + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: elastic-certificates +# secretName: elastic-certificates +# path: /usr/share/elasticsearch/config/certs +# defaultMode: 0755 + +hostAliases: [] +#- ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" + +image: "docker.elastic.co/elasticsearch/elasticsearch" +imageTag: "8.4.1" +imagePullPolicy: "IfNotPresent" + +podAnnotations: {} +# iam.amazonaws.com/role: es-cluster + +# additionals labels +labels: {} + +esJavaOpts: "" # example: "-Xmx1g -Xms1g" + +resources: + requests: + cpu: "1000m" + memory: "2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +initResources: {} +# limits: +# cpu: "25m" +# # memory: "128Mi" +# requests: +# cpu: "25m" +# memory: "128Mi" + +networkHost: "0.0.0.0" + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 30Gi + +rbac: + create: false + serviceAccountAnnotations: {} + serviceAccountName: "" + automountToken: true + +podSecurityPolicy: + create: false + name: "" + spec: + privileged: true + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - persistentVolumeClaim + - emptyDir + +persistence: + enabled: true + labels: + # Add default labels for the volumeClaimTemplate of the StatefulSet + enabled: false + annotations: {} + +extraVolumes: [] +# - name: extras +# emptyDir: {} + +extraVolumeMounts: [] +# - name: extras +# mountPath: /usr/share/extras +# readOnly: true + +extraContainers: [] +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +extraInitContainers: [] +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +# By default this will make sure two pods don't end up on the same node +# Changing this to a region would allow you to spread pods across regions +antiAffinityTopologyKey: "kubernetes.io/hostname" + +# Hard means that by default pods will only be scheduled if there are enough nodes for them +# and that they will never end up on the same node. Setting this to soft will do this "best effort" +antiAffinity: "hard" + +# This is the node affinity settings as defined in +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +nodeAffinity: {} + +# The default is to deploy all pods serially. By setting this to parallel all pods are started at +# the same time when bootstrapping the cluster +podManagementPolicy: "Parallel" + +# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when +# there are many services in the current namespace. +# If you experience slow pod startups you probably want to set this to `false`. +enableServiceLinks: true + +protocol: https +httpPort: 9200 +transportPort: 9300 + +service: + enabled: true + labels: {} + labelsHeadless: {} + type: ClusterIP + # Consider that all endpoints are considered "ready" even if the Pods themselves are not + # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + nodePort: "" + annotations: {} + httpPortName: http + transportPortName: transport + loadBalancerIP: "" + loadBalancerSourceRanges: [] + externalTrafficPolicy: "" + +updateStrategy: RollingUpdate + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# How long to wait for elasticsearch to stop gracefully +terminationGracePeriod: 120 + +sysctlVmMaxMapCount: 262144 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" + +## Use an alternate scheduler. +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + +# Enabling this will publicly expose your Elasticsearch instance. +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "nginx" + pathtype: ImplementationSpecific + hosts: + - host: chart-example.local + paths: + - path: / + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +nameOverride: "" +fullnameOverride: "" +healthNameOverride: "" + +lifecycle: {} +# preStop: +# exec: +# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] +# postStart: +# exec: +# command: +# - bash +# - -c +# - | +# #!/bin/bash +# # Add a template to adjust number of shards/replicas +# TEMPLATE_NAME=my_template +# INDEX_PATTERN="logstash-*" +# SHARD_COUNT=8 +# REPLICA_COUNT=1 +# ES_URL=http://localhost:9200 +# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done +# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' + +sysctlInitContainer: + enabled: true + +keystore: [] + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## In order for a Pod to access Elasticsearch, it needs to have the following label: + ## {{ template "uname" . }}-client: "true" + ## Example for default configuration to access HTTP port: + ## elasticsearch-master-http-client: "true" + ## Example for default configuration to access transport port: + ## elasticsearch-master-transport-client: "true" + + http: + enabled: false + ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace + ## and matching all criteria can reach the DB. + ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this + ## parameter to select these namespaces + ## + # explicitNamespacesSelector: + # # Accept from namespaces with all those different rules (only from whitelisted Pods) + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + + transport: + ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled. + enabled: false + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +tests: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/.helmignore b/ansible/roles/helm_install/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/ingress-nginx/CHANGELOG.md b/ansible/roles/helm_install/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/ansible/roles/helm_install/files/ingress-nginx/Chart.yaml b/ansible/roles/helm_install/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/ansible/roles/helm_install/files/ingress-nginx/OWNERS b/ansible/roles/helm_install/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/ansible/roles/helm_install/files/ingress-nginx/README.md b/ansible/roles/helm_install/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/ansible/roles/helm_install/files/ingress-nginx/README.md.gotmpl b/ansible/roles/helm_install/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-headers-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-default-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-psp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-default-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-extra-modules.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-headers-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-metrics-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-psp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/ingress-nginx/override-values.yaml b/ansible/roles/helm_install/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/ansible/roles/helm_install/files/ingress-nginx/temp.yaml b/ansible/roles/helm_install/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/roles/helm_install/files/ingress-nginx/temp2.yaml b/ansible/roles/helm_install/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/NOTES.txt b/ansible/roles/helm_install/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/_helpers.tpl b/ansible/roles/helm_install/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/_params.tpl b/ansible/roles/helm_install/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrole.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrolebinding.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-udp.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-daemonset.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-deployment.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-hpa.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-ingressclass.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-keda.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-prometheusrules.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-psp.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-role.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-rolebinding.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-internal.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-metrics.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-webhook.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-serviceaccount.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-servicemonitor.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-deployment.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-hpa.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-psp.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-role.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-service.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/templates/dh-param-secret.yaml b/ansible/roles/helm_install/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/ansible/roles/helm_install/files/ingress-nginx/values.yaml b/ansible/roles/helm_install/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/ansible/roles/helm_install/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/ansible/roles/helm_install/files/kafka/.helmignore b/ansible/roles/helm_install/files/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/kafka/Chart.yaml b/ansible/roles/helm_install/files/kafka/Chart.yaml new file mode 100644 index 0000000..1d1c065 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/Chart.yaml @@ -0,0 +1,5 @@ +aiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafkaset +version: 0.1.0 diff --git a/ansible/roles/helm_install/files/kafka/README.txt b/ansible/roles/helm_install/files/kafka/README.txt new file mode 100644 index 0000000..eeba075 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/README.txt @@ -0,0 +1,3 @@ + +포트 바꾸려면 values.yaml 수정과 +broker-config yaml 수정이 동시에 이루어져야 합니다 diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/Chart.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/Chart.yaml new file mode 100644 index 0000000..e5427b9 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.20.0 +description: Kafka GUI for Apache Kafka to manage topics, topics data, consumers group, + schema registry, connect and more... +home: https://akhq.io +icon: https://raw.githubusercontent.com/tchiotludo/akhq/master/client/src/images/logo_black.png +keywords: +- kafka +- confluent +- gui +- schema-registry +- kafka-connect +maintainers: +- email: tchiot.ludo@gmail.com + name: tchiotludo +name: akhq +sources: +- https://github.com/tchiotludo/akhq +version: 0.2.7 diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/LICENSE b/ansible/roles/helm_install/files/kafka/charts/akhq/LICENSE new file mode 100644 index 0000000..9c8f3ea --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/README.md b/ansible/roles/helm_install/files/kafka/charts/akhq/README.md new file mode 100644 index 0000000..80b2c3b --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/README.md @@ -0,0 +1,124 @@ +# AKHQ (previously known as KafkaHQ) + +![Last Version](https://img.shields.io/github/tag-pre/tchiotludo/akhq.svg) +![License](https://img.shields.io/github/license/tchiotludo/akhq) +![Docker Pull](https://img.shields.io/docker/pulls/tchiotludo/akhq.svg) +![Github Downloads](https://img.shields.io/github/downloads/tchiotludo/akhq/total) +![Github Start](https://img.shields.io/github/stars/tchiotludo/akhq.svg) +![Main](https://github.com/tchiotludo/akhq/workflows/Main/badge.svg) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/akhq)](https://artifacthub.io/packages/search?repo=akhq) + +> Kafka GUI for [Apache Kafka](http://kafka.apache.org/) to manage topics, topics data, consumers group, schema registry, connect and more... + +

+ AKHQ for Kafka logo

+ AKHQ for Kafka preview +

+ +## Documentation +* The official AKHQ documentation can be found under: [akhq.io](https://akhq.io/docs) + + +## Sponsors + + + + +
+ + + Upstash + + +

Upstash: Serverless Kafka

+ +
    +
  • True Serverless Kafka with per-request-pricing
  • +
  • Managed Apache Kafka, works with all Kafka clients
  • +
  • Built-in REST API designed for serverless and edge functions
  • +
+ +[Start for free in 30 seconds!](https://upstash.com/?utm_source=AKHQ) +
+ + + + +
+ + + + Upstash + + +

Redpanda

+ +
    +
  • Redpanda is a streaming data platform for developers.
  • +
  • Kafka API compatible.
  • +
  • 10x faster. No ZooKeeper. No JVM!
  • +
+ +[redpanda.com](https://redpanda.com/?utm_source=AKHQ) +
+ +## From AKHQ project creator + + + + +
+ + + Upstash + + +

Kestra: Open source data orchestration and scheduling platform

+ +

+Kestra is an infinitely scalable orchestration and scheduling platform, creating, running, scheduling, and monitoring millions of complex pipelines. +

+ +[Discover the project!](https://github.com/kestra-io/kestra?utm_source=AKHQ) +
+ + + +## Who's using AKHQ +* [Adeo](https://www.adeo.com/) +* [Avlino](https://avlino.com/) +* [Auchan Retail](https://www.auchan-retail.com/) +* [BARMER](https://www.barmer.de/) +* [Bell](https://www.bell.ca) +* [Best buy](https://www.bestbuy.com) +* [BMW Group](https://www.bmwgroup.com) +* [Boulanger](https://www.boulanger.com/) +* [BPCE-IT](https://www.bpce-it.fr/) +* [Decathlon](https://www.decathlon.fr/) +* [Depop](https://www.depop.com) +* [Galeries Lafayette](https://www.galerieslafayette.com/) +* [GetYourGuide](https://www.getyourguide.com) +* [Kitopi](https://kitopi.com) +* [Klarna](https://www.klarna.com) +* [La Redoute](https://laredoute.io/) +* [Leroy Merlin](https://www.leroymerlin.fr/) +* [NEXT Technologies](https://www.nextapp.co/) +* [Nuxeo](https://www.nuxeo.com/) +* [Pipedrive](https://www.pipedrive.com) +* [TVG](https://www.tvg.com) +* [Vodeno](https://www.vodeno.com/) + + + +## Credits + +Many thanks to: + +* [JetBrains](https://www.jetbrains.com/?from=AKHQ) for their free OpenSource license. +* Apache, Apache Kafka, Kafka, and associated open source project names are trademarks of the Apache Software Foundation. AKHQ is not affiliated with, endorsed by, or otherwise associated with the Apache Software. + +[![Jetbrains](https://user-images.githubusercontent.com/2064609/55432917-6df7fc00-5594-11e9-90c4-5133fbb6d4da.png)](https://www.jetbrains.com/?from=AKHQ) + + +## License +Apache 2.0 © [tchiotludo](https://github.com/tchiotludo) diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/NOTES.txt b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/NOTES.txt new file mode 100644 index 0000000..ecaa0e6 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "akhq.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "akhq.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "akhq.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "akhq.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.service.port }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/_helpers.tpl b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/_helpers.tpl new file mode 100644 index 0000000..f7bce89 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "akhq.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "akhq.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "akhq.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "akhq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "akhq.fullname" .) .Values.serviceAccountName }} +{{- else }} +{{- default "default" .Values.serviceAccountName }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Ingress +*/}} +{{- define "akhq.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/configmap.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/configmap.yaml new file mode 100644 index 0000000..6b8a4f3 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configuration }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "akhq.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + application.yml: | +{{ toYaml .Values.configuration | indent 4}} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/deployment.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/deployment.yaml new file mode 100644 index 0000000..b6acb36 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/deployment.yaml @@ -0,0 +1,129 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "akhq.fullname" . }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount | default 1 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + annotations: + {{- if .Values.configuration }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if and (not .Values.existingSecret) (.Values.secrets) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext }} + securityContext: + {{ toYaml .Values.securityContext | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "akhq.serviceAccountName" . }} + {{- if .Values.initContainers }} + initContainers: + {{- range $key, $value := .Values.initContainers }} + - name: {{ $key }} +{{ toYaml $value | indent 10 }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | default "Always" }} + {{- if .Values.containerSecurityContext }} + securityContext: + {{ toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + env: + {{- if .Values.extraEnv }}{{ toYaml .Values.extraEnv | trim | nindent 12 }}{{ end }} + {{- if or (.Values.existingSecrets) (.Values.secrets) }} + - name: MICRONAUT_ENVIRONMENTS + value: secrets + - name: MICRONAUT_CONFIG_FILES + value: /app/application.yml,/app/application-secrets.yml + {{- end }} + volumeMounts: + {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 12 }}{{ end }} + {{- if .Values.configuration }} + - name: config + mountPath: /app/application.yml + subPath: application.yml + {{- end }} + {{- if or (.Values.existingSecrets) (.Values.secrets) }} + - name: secrets + mountPath: /app/application-secrets.yml + subPath: application-secrets.yml + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: management + containerPort: 28081 + protocol: TCP + livenessProbe: + tcpSocket: + port: management + readinessProbe: + httpGet: + path: {{ .Values.readinessProbe.prefix | default "" }}/health + port: management + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }} + {{- if .Values.configuration }} + - name: config + configMap: + name: {{ template "akhq.fullname" . }} + {{- end }} + {{- if or (.Values.existingSecrets) (.Values.secrets) }} + - name: secrets + secret: + {{- if .Values.existingSecrets }} + secretName: {{ .Values.existingSecrets }} + {{- else }} + secretName: {{ template "akhq.fullname" . }}-secrets + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/ingress.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/ingress.yaml new file mode 100644 index 0000000..47ff6c6 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/ingress.yaml @@ -0,0 +1,53 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "akhq.fullname" . -}} +{{- $ingressPaths := .Values.ingress.paths -}} +apiVersion: {{ include "akhq.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.ingress.annotations }} + annotations: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} +spec: +{{- if and .Values.ingress.ingressClassName (eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1") }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + {{- range $ingressPaths }} + - path: {{ . }} + {{- if eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1" }} + pathType: "ImplementationSpecific" + {{- end }} + backend: + {{- if eq (include "akhq.ingress.apiVersion" $) "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + name: http + {{ else }} + serviceName: {{ $fullName }} + servicePort: http + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/secret.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/secret.yaml new file mode 100644 index 0000000..77ac50c --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/secret.yaml @@ -0,0 +1,19 @@ +{{- if and ( not .Values.existingSecrets) (.Values.secrets) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "akhq.fullname" . }}-secrets + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +type: Opaque +data: + application-secrets.yml: {{ toYaml .Values.secrets | b64enc | quote }} + {{- if .Values.kafkaSecrets }} + {{- range $key, $value := .Values.kafkaSecrets }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/service.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/service.yaml new file mode 100644 index 0000000..2877ba8 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/service.yaml @@ -0,0 +1,31 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "akhq.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- range $key, $value := .Values.service.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + {{- range $key, $value := .Values.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + selector: + app.kubernetes.io/name: {{ include "akhq.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/serviceaccount.yaml new file mode 100644 index 0000000..9acd47f --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: {{ include "akhq.name" . }} + helm.sh/chart: {{ include "akhq.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ include "akhq.serviceAccountName" . }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/akhq/values.yaml b/ansible/roles/helm_install/files/kafka/charts/akhq/values.yaml new file mode 100644 index 0000000..ee23561 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/akhq/values.yaml @@ -0,0 +1,145 @@ +# imagePullSecrets: +# - name: my-repository-secret +image: + repository: tchiotludo/akhq + tag: "" # uses Chart.AppVersion by default + +# custom annotations (example: for prometheus) +annotations: {} + #prometheus.io/scrape: 'true' + #prometheus.io/port: '8080' + #prometheus.io/path: '/prometheus' + +podAnnotations: {} + +# custom labels +labels: {} + # custom.label: 'true' + +podLabels: {} + +## You can put directly your configuration here... or add java opts or any other env vars +extraEnv: [] +# - name: AKHQ_CONFIGURATION +# value: | +# akhq: +# secrets: +# docker-kafka-server: +# properties: +# bootstrap.servers: "kafka:9092" +# - name: JAVA_OPTS +# value: "-Djavax.net.ssl.trustStore=/usr/local/openjdk-11/lib/security/cacerts -Djavax.net.ssl.trustStorePassword=password" +# - name: CLASSPATH +# value: "/any/additional/jars/desired.jar:/go/here.jar" + +## Or you can also use configmap for the configuration... +configuration: + akhq: + server: + access-log: + enabled: false + name: org.akhq.log.access + +##... and secret for connection information +existingSecrets: "" +# name of the existingSecret +secrets: + akhq: + connections: + my-cluster-plain-text: + properties: + bootstrap.servers: "kafka:9092" +# schema-registry: +# url: "http://schema-registry:8085" +# type: "confluent" +# basic-auth-username: basic-auth-user +# basic-auth-password: basic-auth-pass +# connect: +# - name: "my-connect" +# url: "http://connect:8083" +# basic-auth-username: basic-auth-user +# basic-auth-password: basic-auth-pass + +kafkaSecrets: [] +#Provide extra base64 encoded kubernetes secrets (keystore/truststore) + +# Any extra volumes to define for the pod (like keystore/truststore) +extraVolumes: [] + +# Any extra volume mounts to define for the akhq container +extraVolumeMounts: [] + +# Specify ServiceAccount for pod +serviceAccountName: null +serviceAccount: + create: false + #annotations: + # eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + +# Add your own init container or uncomment and modify the example. +initContainers: {} +# create-keystore: +# image: "openjdk:11-slim" +# command: ['sh', '-c', 'keytool'] +# volumeMounts: +# - mountPath: /tmp +# name: certs + +# Configure the Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1000 + +# Configure the Container Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +containerSecurityContext: {} + # allowPrivilegeEscalation: false + # privileged: false + # capabilities: + # drop: + # - ALL + # runAsNonRoot: true + # runAsUser: 1001 + # readOnlyRootFilesystem: true + +service: + enabled: true + type: NodePort + port: 80 + nodePort: 32551 + labels: {} + annotations: + # cloud.google.com/load-balancer-type: "Internal" + +ingress: + enabled: false + ingressClassName: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + paths: + - / + hosts: + - akhq.demo.com + tls: [] + # - secretName: akhq-tls + # hosts: + # - akhq.demo.com + +readinessProbe: + prefix: "" # set same as `micronaut.server.context-path` + +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/.helmignore b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/.helmignore new file mode 100644 index 0000000..7a93969 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +example/ +README.md diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/Chart.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/Chart.yaml new file mode 100644 index 0000000..5656f52 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +appVersion: v0.4.0 +description: A Helm chart for kafka-UI +icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png +name: kafka-ui +type: application +version: v0.4.1 diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/index.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/index.yaml new file mode 100644 index 0000000..8728071 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2021-11-11T12:26:08.479581+03:00" diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/NOTES.txt b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/NOTES.txt new file mode 100644 index 0000000..94e8d39 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/_helpers.tpl b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/_helpers.tpl new file mode 100644 index 0000000..510452d --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/_helpers.tpl @@ -0,0 +1,79 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka-ui.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kafka-ui.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kafka-ui.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kafka-ui.labels" -}} +helm.sh/chart: {{ include "kafka-ui.chart" . }} +{{ include "kafka-ui.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kafka-ui.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kafka-ui.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kafka-ui.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + + +{{/* +This allows us to check if the registry of the image is specified or not. +*/}} +{{- define "kafka-ui.imageName" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repository := .Values.image.repository -}} +{{- $tag := .Values.image.tag | default .Chart.AppVersion -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repository $tag -}} +{{- else }} +{{- printf "%s:%s" $repository $tag -}} +{{- end }} +{{- end -}} + diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap.yaml new file mode 100644 index 0000000..22d2a69 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.envs.config -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka-ui.fullname" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +data: + {{- toYaml .Values.envs.config | nindent 2 }} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap_fromValues.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap_fromValues.yaml new file mode 100644 index 0000000..ae8b769 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/configmap_fromValues.yaml @@ -0,0 +1,11 @@ +{{- if .Values.yamlApplicationConfig -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka-ui.fullname" . }}-fromvalues + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +data: + config.yml: |- + {{- toYaml .Values.yamlApplicationConfig | nindent 4}} +{{ end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/deployment.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/deployment.yaml new file mode 100644 index 0000000..1f7f6c9 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/deployment.yaml @@ -0,0 +1,139 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kafka-ui.fullname" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +spec: +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- include "kafka-ui.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + labels: + {{- include "kafka-ui.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: {{ include "kafka-ui.imageName" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}} + env: + {{- with .Values.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}} + - name: SPRING_CONFIG_LOCATION + {{- if .Values.yamlApplicationConfig }} + value: /kafka-ui/config.yml + {{- else if .Values.yamlApplicationConfigConfigMap }} + value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + {{- if .Values.existingConfigMap }} + - configMapRef: + name: {{ .Values.existingConfigMap }} + {{- end }} + {{- if .Values.envs.config }} + - configMapRef: + name: {{ include "kafka-ui.fullname" . }} + {{- end }} + {{- if .Values.existingSecret }} + - secretRef: + name: {{ .Values.existingSecret }} + {{- end }} + {{- if .Values.envs.secret}} + - secretRef: + name: {{ include "kafka-ui.fullname" . }} + {{- end}} + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }} + path: {{ get $contextPath "path" }} + port: http + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }} + path: {{ get $contextPath "path" }} + port: http + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}} + volumeMounts: + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.yamlApplicationConfig }} + - name: kafka-ui-yaml-conf + mountPath: /kafka-ui/ + {{- end }} + {{- if .Values.yamlApplicationConfigConfigMap}} + - name: kafka-ui-yaml-conf-configmap + mountPath: /kafka-ui/ + {{- end }} + {{- end }} + {{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}} + volumes: + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.yamlApplicationConfig }} + - name: kafka-ui-yaml-conf + configMap: + name: {{ include "kafka-ui.fullname" . }}-fromvalues + {{- end }} + {{- if .Values.yamlApplicationConfigConfigMap}} + - name: kafka-ui-yaml-conf-configmap + configMap: + name: {{ .Values.yamlApplicationConfigConfigMap.name }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/hpa.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/hpa.yaml new file mode 100644 index 0000000..1509ef3 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kafka-ui.fullname" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kafka-ui.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/ingress.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/ingress.yaml new file mode 100644 index 0000000..7659867 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/ingress.yaml @@ -0,0 +1,87 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "kafka-ui.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (trimPrefix "v" .Capabilities.KubeVersion.Version | semverCompare ">= 1.19" ) -}} +apiVersion: networking.k8s.io/v1 +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - {{ tpl .Values.ingress.host . }} + secretName: {{ .Values.ingress.tls.secretName }} + {{- end }} + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + rules: + - http: + paths: +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (trimPrefix "v" .Capabilities.KubeVersion.Version | semverCompare ">= 1.19" ) -}} + {{- range .Values.ingress.precedingPaths }} + - path: {{ .path }} + pathType: Prefix + backend: + service: + name: {{ .serviceName }} + port: + number: {{ .servicePort }} + {{- end }} + - backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + pathType: Prefix +{{- if .Values.ingress.path }} + path: {{ .Values.ingress.path }} +{{- end }} + {{- range .Values.ingress.succeedingPaths }} + - path: {{ .path }} + pathType: Prefix + backend: + service: + name: {{ .serviceName }} + port: + number: {{ .servicePort }} + {{- end }} +{{- if tpl .Values.ingress.host . }} + host: {{tpl .Values.ingress.host . }} +{{- end }} +{{- else -}} + {{- range .Values.ingress.precedingPaths }} + - path: {{ .path }} + backend: + serviceName: {{ .serviceName }} + servicePort: {{ .servicePort }} + {{- end }} + - backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} +{{- if .Values.ingress.path }} + path: {{ .Values.ingress.path }} +{{- end }} + {{- range .Values.ingress.succeedingPaths }} + - path: {{ .path }} + backend: + serviceName: {{ .serviceName }} + servicePort: {{ .servicePort }} + {{- end }} +{{- if tpl .Values.ingress.host . }} + host: {{ tpl .Values.ingress.host . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-egress.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-egress.yaml new file mode 100644 index 0000000..4f58280 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-egress.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "kafka-ui.selectorLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- if .Values.networkPolicy.egressRules.customRules }} + {{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-ingress.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-ingress.yaml new file mode 100644 index 0000000..7498867 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/networkpolicy-ingress.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "kafka-ui.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + {{- if .Values.networkPolicy.ingressRules.customRules }} + {{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/secret.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/secret.yaml new file mode 100644 index 0000000..a2ebf0f --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "kafka-ui.fullname" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +type: Opaque +data: + {{- toYaml .Values.envs.secret | nindent 2 }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/service.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/service.yaml new file mode 100644 index 0000000..5801135 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kafka-ui.fullname" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + {{- include "kafka-ui.selectorLabels" . | nindent 4 }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/serviceaccount.yaml new file mode 100644 index 0000000..b89551c --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kafka-ui.serviceAccountName" . }} + labels: + {{- include "kafka-ui.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka-ui/values.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/values.yaml new file mode 100644 index 0000000..d453453 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka-ui/values.yaml @@ -0,0 +1,151 @@ +replicaCount: 1 + +image: + registry: docker.io + repository: provectuslabs/kafka-ui + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +existingConfigMap: "" +yamlApplicationConfig: + {} + # kafka: + # clusters: + # - name: yaml + # bootstrapServers: kafka-service:9092 + # spring: + # security: + # oauth2: + # auth: + # type: disabled + # management: + # health: + # ldap: + # enabled: false +yamlApplicationConfigConfigMap: + {} + # keyName: config.yml + # name: configMapName +existingSecret: "" +envs: + secret: {} + config: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 + +networkPolicy: + enabled: false + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] + ingressRules: + ## Additional custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] + +podAnnotations: {} +podLabels: {} + +podSecurityContext: + {} + # fsGroup: 2000 + +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: NodePort + port: 80 + # if you want to force a specific nodePort. Must be use with service.type=NodePort + # nodePort: + +# Ingress configuration +ingress: + # Enable ingress resource + enabled: false + + # Annotations for the Ingress + annotations: {} + + # ingressClassName for the Ingress + ingressClassName: "" + + # The path for the Ingress + path: "" + + # The hostname for the Ingress + host: "" + + # configs for Ingress TLS + tls: + # Enable TLS termination for the Ingress + enabled: false + # the name of a pre-created Secret containing a TLS private key and certificate + secretName: "" + + # HTTP paths to add to the Ingress before the default path + precedingPaths: [] + + # Http paths to add to the Ingress after the default path + succeedingPaths: [] + +resources: + {} + # limits: + # cpu: 200m + # memory: 512Mi + # requests: + # cpu: 200m + # memory: 256Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +env: {} + +initContainers: {} + +volumeMounts: {} + +volumes: {} diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/.helmignore b/ansible/roles/helm_install/files/kafka/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/1.broker-config.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..0b5a0c4 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/1.broker-config.yaml @@ -0,0 +1,171 @@ +kind: ConfigMap +metadata: + name: broker-config +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${OUTSIDE_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + # init부 수정 금지 ( init.sh에 디펜던시) + #init#broker.id=#init# + #init#broker.rack=#init# + #init#advertised.listeners=OUTSIDE://#init#,PLAINTEXT://:9092 + ######################################################################## + ##### Broker, Zookeeper + log.dirs=/var/lib/kafka/data/topics + zookeeper.connect=zookeeper:2181 + zookeeper.session.timeout.ms=18000 + controller.quorum.election.backoff.max.ms=1000 + controller.quorum.election.timeout.ms=1000 + ######################################################################## + ##### Listener + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + inter.broker.listener.name=PLAINTEXT + ######################################################################## + ###### Thread + background.threads=10 + num.recovery.threads.per.data.dir=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + log.cleaner.threads=1 + ######################################################################## + ###### Topic, Partition + replica.fetch.min.bytes=1 + replica.lag.time.max.ms=30000 + auto.create.topics.enable=true + default.replication.factor=1 + min.insync.replicas=1 + delete.topic.enable=true + num.partitions=12 + auto.leader.rebalance.enable=true + leader.imbalance.check.interval.seconds=120 + ######################################################################## + ##### Log, Message + log.cleaner.enable=true + log.cleanup.policy=delete + log.segment.delete.delay.ms=60000 + log.flush.interval.messages=1000000 + log.flush.interval.ms=60000 + log.flush.scheduler.interval.ms=2000 + log.flush.offset.checkpoint.interval.ms=60000 + ######################################################################## + ##### Offset, Commit + offsets.retention.minutes=1440 + offsets.topic.replication.factor=1 + ######################################################################## + ##### MessageSize, Socket + message.max.bytes=1048576 + max.message.bytes=1048576 + replica.fetch.max.bytes=1048576 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=65536 + socket.request.max.bytes=104857600 + ######################################################################## + ##### Retention + log.retention.minutes=5 + log.retention.bytes=1073741824 + log.retention.check.interval.ms=60000 + log.segment.bytes=536870912 + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false + diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/Chart.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/templates/2.dns.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..73c9ee1 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/templates/3.bootstrap-service.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..d96c323 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/templates/5.kafka.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..3dc02ce --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,124 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + spec: + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.initContainers.image.repository }}:{{ .Values.initContainers.image.tag }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + resources: + requests: + cpu: 500m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 1000m + memory: 10000Mi + readinessProbe: + tcpSocket: + port: 9092 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.global.StorageClassName }} + resources: + requests: + storage: 50Gi + diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/templates/6.outside.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..ddaac45 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,127 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + nodePort: {{ .Values.service.kafka_outside_0 }} + type: NodePort +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + nodePort: {{ .Values.service.kafka_outside_1 }} + type: NodePort +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-2 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "2" + ports: + - protocol: TCP + targetPort: 9094 + port: 32402 + nodePort: {{ .Values.service.kafka_outside_2 }} + type: NodePort +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + nodePort: {{ .Values.service.kafka_global_0 }} + type: NodePort +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + nodePort: {{ .Values.service.kafka_global_1 }} + type: NodePort +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-2 + namespace: {{ .Release.Namespace }} +spec: + selector: + app: kafka + kafka-broker-id: "2" + ports: + - protocol: TCP + targetPort: 9095 + port: 32502 + nodePort: {{ .Values.service.kafka_global_2 }} + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: {{ .Release.Namespace }} +spec: + type: NodePort + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + nodePort: {{ .Values.service.kafka_broker }} + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: {{ .Release.Namespace }} +spec: + type: NodePort + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + nodePort: {{ .Values.service.kafka_broker_global }} + selector: + app: kafka diff --git a/ansible/roles/helm_install/files/kafka/charts/kafka/values.yaml b/ansible/roles/helm_install/files/kafka/charts/kafka/values.yaml new file mode 100644 index 0000000..8e74b79 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/kafka/values.yaml @@ -0,0 +1,73 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +service: + kafka_outside_0: "32400" + kafka_outside_1: "32401" + kafka_global_0: "32500" + kafka_global_1: "32501" + kafka_broker: "9094" + kafka_broker_global: "9095" diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/.helmignore b/ansible/roles/helm_install/files/kafka/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/Chart.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/0.config.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..7d26875 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: {{ .Release.Namespace }} +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=0 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.{{ .Release.Namespace }}.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/1.service-leader-election.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..7ceca4c --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/2.service-client.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..d9137e5 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/4.statefulset.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..48fad18 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,97 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.initContainers.image.repository }}:{{ .Values.initContainers.image.tag }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 200m + memory: 1000Mi + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | nc -w 2 localhost 2181 | grep imok'] + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.global.StorageClassName }} + resources: + requests: + storage: 30Gi diff --git a/ansible/roles/helm_install/files/kafka/charts/zookeeper/values.yaml b/ansible/roles/helm_install/files/kafka/charts/zookeeper/values.yaml new file mode 100644 index 0000000..386a195 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/roles/helm_install/files/kafka/index.yaml b/ansible/roles/helm_install/files/kafka/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/ansible/roles/helm_install/files/kafka/templates/role.yaml b/ansible/roles/helm_install/files/kafka/templates/role.yaml new file mode 100644 index 0000000..0fe24af --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: {{ .Release.Name }}-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/ansible/roles/helm_install/files/kafka/test b/ansible/roles/helm_install/files/kafka/test new file mode 100644 index 0000000..d1fbcf1 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/test @@ -0,0 +1,637 @@ +--- +# Source: kafkaset/charts/akhq/templates/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: test-akhq-secrets + labels: + app.kubernetes.io/name: akhq + helm.sh/chart: akhq-0.2.7 + app.kubernetes.io/instance: test + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + application-secrets.yml: "YWtocToKICBjb25uZWN0aW9uczoKICAgIG15LWNsdXN0ZXItcGxhaW4tdGV4dDoKICAgICAgcHJvcGVydGllczoKICAgICAgICBib290c3RyYXAuc2VydmVyczoga2Fma2E6OTA5Mg==" +--- +# Source: kafkaset/charts/akhq/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-akhq + labels: + app.kubernetes.io/name: akhq + helm.sh/chart: akhq-0.2.7 + app.kubernetes.io/instance: test + app.kubernetes.io/managed-by: Helm +data: + application.yml: | + akhq: + server: + access-log: + enabled: false + name: org.akhq.log.access +--- +# Source: kafkaset/charts/zookeeper/templates/0.config.yaml +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: dsk-middle +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=0 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.dsk-middle.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN +--- +# Source: kafkaset/templates/role.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: test-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: dsk-middle +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +--- +# Source: kafkaset/charts/akhq/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-akhq + labels: + app.kubernetes.io/name: akhq + helm.sh/chart: akhq-0.2.7 + app.kubernetes.io/instance: test + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: akhq + app.kubernetes.io/instance: test +--- +# Source: kafkaset/charts/kafka/templates/2.dns.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: dsk-middle +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- +# Source: kafkaset/charts/kafka/templates/3.bootstrap-service.yaml +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: dsk-middle +spec: + ports: + - port: 9092 + selector: + app: kafka +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + nodePort: 32400 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + nodePort: 32401 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-2 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "2" + ports: + - protocol: TCP + targetPort: 9094 + port: 32402 + nodePort: 32402 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + nodePort: 32500 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + nodePort: 32501 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-2 + namespace: dsk-middle +spec: + selector: + app: kafka + kafka-broker-id: "2" + ports: + - protocol: TCP + targetPort: 9095 + port: 32502 + nodePort: 32502 + type: NodePort +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: dsk-middle +spec: + type: NodePort + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + nodePort: 30094 + selector: + app: kafka +--- +# Source: kafkaset/charts/kafka/templates/6.outside.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: dsk-middle +spec: + type: NodePort + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + nodePort: 30095 + selector: + app: kafka +--- +# Source: kafkaset/charts/zookeeper/templates/1.service-leader-election.yaml +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: dsk-middle +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent +--- +# Source: kafkaset/charts/zookeeper/templates/2.service-client.yaml +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: dsk-middle +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper +--- +# Source: kafkaset/charts/akhq/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-akhq + labels: + app.kubernetes.io/name: akhq + helm.sh/chart: akhq-0.2.7 + app.kubernetes.io/instance: test + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: akhq + app.kubernetes.io/instance: test + template: + metadata: + annotations: + checksum/config: 00490bc3c20c1a8c6ab1b49540d63065ad39aae5b19766fc0a884db2c0b5ecbf + checksum/secrets: 235bfd9fa6c8713d840dc969c1c05fd1b82c200a02bd4187955d14a983effe58 + labels: + app.kubernetes.io/name: akhq + app.kubernetes.io/instance: test + spec: + serviceAccountName: default + containers: + - name: akhq + image: "tchiotludo/akhq:0.20.0" + imagePullPolicy: Always + env: + - name: MICRONAUT_ENVIRONMENTS + value: secrets + - name: MICRONAUT_CONFIG_FILES + value: /app/application.yml,/app/application-secrets.yml + volumeMounts: + - name: config + mountPath: /app/application.yml + subPath: application.yml + - name: secrets + mountPath: /app/application-secrets.yml + subPath: application-secrets.yml + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: management + containerPort: 28081 + protocol: TCP + livenessProbe: + tcpSocket: + port: management + readinessProbe: + httpGet: + path: /health + port: management + resources: + {} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + tolerations: + - key: dev/data-kafka + operator: Exists + volumes: + - name: config + configMap: + name: test-akhq + - name: secrets + secret: + secretName: test-akhq-secrets +--- +# Source: kafkaset/charts/kafka/templates/5.kafka.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: dsk-middle +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + weight: 100 + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - zookeeper + topologyKey: kubernetes.io/hostname + weight: 50 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - kafka + topologyKey: kubernetes.io/hostname + weight: 50 + tolerations: + - key: dev/data-kafka + operator: Exists + + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: datasaker/kafka-initutils:v1.0.0 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: datasaker/kafka:v1.0.1 + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + resources: + requests: + cpu: 500m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 1000m + memory: 10000Mi + readinessProbe: + tcpSocket: + port: 9092 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: + resources: + requests: + storage: 50Gi +--- +# Source: kafkaset/charts/zookeeper/templates/4.statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: dsk-middle +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - zookeeper + topologyKey: kubernetes.io/hostname + weight: 50 + tolerations: + - key: dev/data-kafka + operator: Exists + + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: datasaker/kafka-initutils:v1.0.0 + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: datasaker/kafka:v1.0.0 + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 200m + memory: 1000Mi + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | nc -w 2 localhost 2181 | grep imok'] + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: + resources: + requests: + storage: 30Gi +--- +# Source: kafkaset/charts/kafka/templates/2.dns.yaml +# A headless service to create DNS records diff --git a/ansible/roles/helm_install/files/kafka/values.yaml b/ansible/roles/helm_install/files/kafka/values.yaml new file mode 100644 index 0000000..4856d73 --- /dev/null +++ b/ansible/roles/helm_install/files/kafka/values.yaml @@ -0,0 +1,199 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +#service: +# type: ClusterIP +# port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {"datasaker/group": "data"} + +tolerations: [] + +affinity: {} + +global: + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # KAFK_INITUTILS_VERSION: v1.0.0 + # KAFKA_VERSION: v1.0.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + # IMXC_REGISTRY: icn.ocir.io/cntxl7bbdp4p + # StorageClassName: openebs-hostpath + + +# kafka의 노드 포트들을 명-시 +kafka: + image: + repository: datasaker/kafka + tag: v1.0.1 + initContainers: + image: + repository: datasaker/kafka-initutils + tag: v1.0.0 + service: + kafka_outside_0: "32400" + kafka_outside_1: "32401" + kafka_outside_2: "32402" + kafka_global_0: "32500" + kafka_global_1: "32501" + kafka_global_2: "32502" + kafka_broker: "30094" + kafka_broker_global: "30095" + + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: "datasaker/group" + operator: In + values: + - "data-kafka" + weight: 100 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + +zookeeper: + image: + repository: datasaker/kafka + tag: v1.0.0 + initContainers: + image: + repository: datasaker/kafka-initutils + tag: v1.0.0 + + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + +akhq: + service: + enabled: true + type: NodePort + port: 80 + nodePort: 32551 + + secrets: + akhq: + connections: + my-cluster-plain-text: + properties: + bootstrap.servers: "kafka:9092" + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + +# nodeSelector: {"datasaker/group": "data-kafka"} diff --git a/ansible/roles/helm_install/files/keycloak/Chart.lock b/ansible/roles/helm_install/files/keycloak/Chart.lock new file mode 100644 index 0000000..388323e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.1.22 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.0 +digest: sha256:b60a9ec850facc2ea59c40c0a160050f7720192fd683ad84bfa75442e151e2d6 +generated: "2022-04-21T13:59:00.123662927Z" diff --git a/ansible/roles/helm_install/files/keycloak/Chart.yaml b/ansible/roles/helm_install/files/keycloak/Chart.yaml new file mode 100644 index 0000000..2888b93 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: DeveloperTools +apiVersion: v2 +appVersion: 16.1.1 +dependencies: +- condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Keycloak is a high performance Java-based identity and access management + solution. It lets developers add an authentication layer to their applications with + minimum effort. +home: https://www.keycloak.org +icon: https://bitnami.com/assets/stacks/keycloak/img/keycloak-stack-220x234.png +keywords: +- keycloak +- access-management +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: keycloak +sources: +- https://github.com/bitnami/bitnami-docker-keycloak +- https://github.com/keycloak/keycloak +version: 7.1.17 diff --git a/ansible/roles/helm_install/files/keycloak/README.md b/ansible/roles/helm_install/files/keycloak/README.md new file mode 100644 index 0000000..4b520e6 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/README.md @@ -0,0 +1,443 @@ + + +# Keycloak packaged by Bitnami + +Keycloak is a high performance Java-based identity and access management solution. It lets developers add an authentication layer to their applications with minimum effort. + +[Overview of Keycloak](https://www.keycloak.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console + helm repo add bitnami https://charts.bitnami.com/bitnami + helm install my-release bitnami/keycloak +``` + +## Introduction + +Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads. + +This chart bootstraps a [Keycloak](https://github.com/bitnami/bitnami-docker-keycloak) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/keycloak +``` + +These commands deploy a Keycloak application on the Kubernetes cluster in the default configuration. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override keycloak.fullname | `""` | +| `fullnameOverride` | String to fully override keycloak.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the the statefulset | `["infinity"]` | + + +### Keycloak parameters + +| Name | Description | Value | +| --------------------------------- | --------------------------------------------------------------------------------------------- | ---------------------- | +| `image.registry` | Keycloak image registry | `docker.io` | +| `image.repository` | Keycloak image repository | `bitnami/keycloak` | +| `image.tag` | Keycloak image tag (immutable tags are recommended) | `16.1.1-debian-10-r36` | +| `image.pullPolicy` | Keycloak image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `auth.createAdminUser` | Create administrator user on boot | `true` | +| `auth.adminUser` | Keycloak administrator user | `user` | +| `auth.adminPassword` | Keycloak administrator password for the new user | `""` | +| `auth.managementUser` | Wildfly management user | `manager` | +| `auth.managementPassword` | Wildfly management password | `""` | +| `auth.existingSecret` | An already existing secret containing auth info | `""` | +| `auth.existingSecretPerPassword` | Override `existingSecret` and other secret values | `{}` | +| `auth.tls.enabled` | Enable TLS encryption | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates. Currently only supports PEM certificates | `false` | +| `auth.tls.existingSecret` | Existing secret containing the TLS certificates per Keycloak replica | `""` | +| `auth.tls.usePem` | Use PEM certificates as input instead of PKS12/JKS stores | `false` | +| `auth.tls.truststoreFilename` | Truststore specific filename inside the existing secret | `""` | +| `auth.tls.keystoreFilename` | Keystore specific filename inside the existing secret | `""` | +| `auth.tls.jksSecret` | DEPRECATED. Use `auth.tls.existingSecret` instead | `""` | +| `auth.tls.keystorePassword` | Password to access the keystore when it's password-protected | `""` | +| `auth.tls.truststorePassword` | Password to access the truststore when it's password-protected | `""` | +| `auth.tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `auth.tls.resources.requests` | The requested resources for the TLS init container | `{}` | +| `proxyAddressForwarding` | Enable Proxy Address Forwarding | `false` | +| `serviceDiscovery.enabled` | Enable Service Discovery for Keycloak (required if `replicaCount` > `1`) | `false` | +| `serviceDiscovery.protocol` | Sets the protocol that Keycloak nodes would use to discover new peers | `kubernetes.KUBE_PING` | +| `serviceDiscovery.properties` | Properties for the discovery protocol set in `serviceDiscovery.protocol` parameter | `[]` | +| `serviceDiscovery.transportStack` | Transport stack for the discovery protocol set in `serviceDiscovery.protocol` parameter | `tcp` | +| `cache.ownersCount` | Number of nodes that will replicate cached data | `1` | +| `cache.authOwnersCount` | Number of nodes that will replicate cached authentication data | `1` | +| `configuration` | Keycloak Configuration. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | Name of existing ConfigMap with Keycloak configuration | `""` | +| `extraStartupArgs` | Extra default startup args | `""` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraEnvVars` | Extra environment variables to be set on Keycloak container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | + + +### Keycloak statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of Keycloak replicas to deploy | `1` | +| `containerPorts.http` | Keycloak HTTP container port | `8080` | +| `containerPorts.https` | Keycloak HTTPS container port | `8443` | +| `containerPorts.management` | Keycloak management HTTP container port | `9990` | +| `podSecurityContext.enabled` | Enabled Keycloak pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set Keycloak pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled Keycloak containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Keycloak container's Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Keycloak container's Security Context runAsNonRoot | `true` | +| `resources.limits` | The resources limits for the Keycloak containers | `{}` | +| `resources.requests` | The requested resources for the Keycloak containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe on Keycloak containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `300` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `1` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Keycloak containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Keycloak containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom Liveness probes for Keycloak | `{}` | +| `customReadinessProbe` | Custom Rediness probes Keycloak | `{}` | +| `customStartupProbe` | Custom Startup probes for Keycloak | `{}` | +| `lifecycleHooks` | LifecycleHooks to set additional configuration at startup | `{}` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `podLabels` | Extra labels for Keycloak pods | `{}` | +| `podAnnotations` | Annotations for Keycloak pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podManagementPolicy` | Pod management policy for the Keycloak statefulset | `Parallel` | +| `priorityClassName` | Keycloak pods' Priority Class Name | `""` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `terminationGracePeriodSeconds` | Seconds Keycloak pod needs to terminate gracefully | `""` | +| `updateStrategy.type` | Keycloak statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Keycloak statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for Keycloak pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Keycloak container(s) | `[]` | +| `initContainers` | Add additional init containers to the Keycloak pods | `[]` | +| `sidecars` | Add additional sidecar containers to the Keycloak pods | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes service type | `LoadBalancer` | +| `service.ports.http` | Keycloak service HTTP port | `80` | +| `service.ports.https` | Keycloak service HTTPS port | `443` | +| `service.nodePorts` | Specify the nodePort values for the LoadBalancer and NodePort service types. | `{}` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.clusterIP` | Keycloak service clusterIP IP | `""` | +| `service.loadBalancerIP` | loadBalancerIP for the SuiteCRM Service (optional, cloud specific) | `""` | +| `service.loadBalancerSourceRanges` | Address that are allowed when service is LoadBalancer | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.annotations` | Additional custom annotations for Keycloak service | `{}` | +| `service.extraPorts` | Extra port to expose on Keycloak service | `[]` | +| `ingress.enabled` | Enable ingress record generation for Keycloak | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `keycloak.local` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.servicePort` | Backend service port to use | `http` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `networkPolicy.enabled` | Enable the default NetworkPolicy policy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy rules | `{}` | + + +### RBAC parameter + +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for Keycloak pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create and use RBAC resources or not | `false` | +| `rbac.rules` | Custom RBAC rules | `[]` | + + +### Other parameters + +| Name | Description | Value | +| -------------------------- | -------------------------------------------------------------- | ------- | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `autoscaling.enabled` | Enable autoscaling for Keycloak | `false` | +| `autoscaling.minReplicas` | Minimum number of Keycloak replicas | `1` | +| `autoscaling.maxReplicas` | Maximum number of Keycloak replicas | `11` | +| `autoscaling.targetCPU` | Target CPU utilization percentage | `""` | +| `autoscaling.targetMemory` | Target Memory utilization percentage | `""` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ------- | +| `metrics.enabled` | Enable exposing Keycloak statistics | `false` | +| `metrics.service.ports.http` | Metrics service HTTP port | `9990` | +| `metrics.service.annotations` | Annotations for enabling prometheus to access the metrics endpoints | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + + +### keycloak-config-cli parameters + +| Name | Description | Value | +| --------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------- | +| `keycloakConfigCli.enabled` | Whether to enable keycloak-config-cli job | `false` | +| `keycloakConfigCli.image.registry` | keycloak-config-cli container image registry | `docker.io` | +| `keycloakConfigCli.image.repository` | keycloak-config-cli container image repository | `bitnami/keycloak-config-cli` | +| `keycloakConfigCli.image.tag` | keycloak-config-cli container image tag | `4.7.0-debian-10-r14` | +| `keycloakConfigCli.image.pullPolicy` | keycloak-config-cli container image pull policy | `IfNotPresent` | +| `keycloakConfigCli.image.pullSecrets` | keycloak-config-cli container image pull secrets | `[]` | +| `keycloakConfigCli.annotations` | Annotations for keycloak-config-cli job | `{}` | +| `keycloakConfigCli.command` | Command for running the container (set to default if not set). Use array form | `[]` | +| `keycloakConfigCli.args` | Args for running the container (set to default if not set). Use array form | `[]` | +| `keycloakConfigCli.hostAliases` | Job pod host aliases | `[]` | +| `keycloakConfigCli.resources.limits` | The resources limits for the keycloak-config-cli container | `{}` | +| `keycloakConfigCli.resources.requests` | The requested resources for the keycloak-config-cli container | `{}` | +| `keycloakConfigCli.containerSecurityContext.enabled` | Enabled keycloak-config-cli containers' Security Context | `true` | +| `keycloakConfigCli.containerSecurityContext.runAsUser` | Set keycloak-config-cli container's Security Context runAsUser | `1001` | +| `keycloakConfigCli.containerSecurityContext.runAsNonRoot` | Set keycloak-config-cli container's Security Context runAsNonRoot | `true` | +| `keycloakConfigCli.podSecurityContext.enabled` | Enabled keycloak-config-cli pods' Security Context | `true` | +| `keycloakConfigCli.podSecurityContext.fsGroup` | Set keycloak-config-cli pod's Security Context fsGroup | `1001` | +| `keycloakConfigCli.backoffLimit` | Number of retries before considering a Job as failed | `1` | +| `keycloakConfigCli.podLabels` | Pod extra labels | `{}` | +| `keycloakConfigCli.podAnnotations` | Annotations for job pod | `{}` | +| `keycloakConfigCli.extraEnvVars` | Additional environment variables to set | `[]` | +| `keycloakConfigCli.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `keycloakConfigCli.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `keycloakConfigCli.extraVolumes` | Extra volumes to add to the job | `[]` | +| `keycloakConfigCli.extraVolumeMounts` | Extra volume mounts to add to the container | `[]` | +| `keycloakConfigCli.configuration` | keycloak-config-cli realms configuration | `{}` | +| `keycloakConfigCli.existingConfigmap` | ConfigMap with keycloak-config-cli configuration. This will override `keycloakConfigCli.config` | `""` | + + +### Database parameters + +| Name | Description | Value | +| -------------------------------------------- | ----------------------------------------------------------------------- | ------------------ | +| `postgresql.enabled` | Switch to enable or disable the PostgreSQL helm chart | `true` | +| `postgresql.auth.username` | Name for a custom user to create | `bn_keycloak` | +| `postgresql.auth.password` | Password for the custom user to create | `""` | +| `postgresql.auth.database` | Name for a custom database to create | `bitnami_keycloak` | +| `postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials | `""` | +| `postgresql.architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `externalDatabase.host` | Database host | `""` | +| `externalDatabase.port` | Database port number | `5432` | +| `externalDatabase.user` | Non-root username for Keycloak | `bn_keycloak` | +| `externalDatabase.password` | Password for the non-root username for Keycloak | `""` | +| `externalDatabase.database` | Keycloak database name | `bitnami_keycloak` | +| `externalDatabase.existingSecret` | Name of an existing secret resource containing the database credentials | `""` | +| `externalDatabase.existingSecretPasswordKey` | Name of an existing secret key containing the database credentials | `""` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install my-release --set auth.adminPassword=secretpassword bitnami/keycloak +``` + +The above command sets the Keycloak administrator password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/keycloak +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +Keycloak realms, users and clients can be created from the Keycloak administration panel. Refer to the [tutorial on adding user authentication to applications with Keycloak](https://docs.bitnami.com/tutorials/integrate-keycloak-authentication-kubernetes) for more details on these operations. + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use an external database + +Sometimes, you may want to have Keycloak connect to an external database rather than a database within your cluster - for example, when using a managed database service, or when running a single database server for all your applications. To do this, set the `postgresql.enabled` parameter to `false` and specify the credentials for the external database using the `externalDatabase.*` parameters. + +Refer to the [chart documentation on using an external database](https://docs.bitnami.com/kubernetes/apps/keycloak/configuration/use-external-database) for more details and an example. + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: KEYCLOAK_LOG_LEVEL + value: DEBUG +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Use Sidecars and Init Containers + +If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter. + +Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/apps/keycloak/configuration/configure-sidecar-init-containers/). + +### Initialize a fresh instance + +The [Bitnami Keycloak](https://github.com/bitnami/bitnami-docker-keycloak) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to this option, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option. + +The allowed extensions is `.sh`. + +### Deploy extra resources + +There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. + +### Set Pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Configure Ingress + +This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/master/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/master/bitnami/contour) you can utilize the ingress controller to serve your application. + +To enable Ingress integration, set `ingress.enabled` to `true`. The `ingress.hostname` property can be used to set the host name. The `ingress.tls` parameter can be used to add the TLS configuration for this host. It is also possible to have more than one host, with a separate TLS configuration for each host. [Learn more about configuring and using Ingress](https://docs.bitnami.com/kubernetes/apps/keycloak/configuration/configure-ingress/). + +### Configure TLS Secrets for use with Ingress + +The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/apps/keycloak/administration/enable-tls-ingress/). + +### Use with ingress offloading SSL + +If your ingress controller has the SSL Termination, you should set `proxyAddressForwarding` to `true` or you should add the following env vars in `extraEnvVars` + +```yaml +- name: KEYCLOAK_PROXY_ADDRESS_FORWARDING + value: "true" +- name: KEYCLOAK_FRONTEND_URL + value: "https://keycloak.xxx" +``` + +### Manage secrets and passwords + +This chart provides several ways to manage passwords: + +* Values passed to the chart +* An existing secret with all the passwords (via the `existingSecret` parameter) +* Multiple existing secrets with all the passwords (via the `existingSecretPerPassword` parameter) + +Refer to the [chart documentation on managing passwords](https://docs.bitnami.com/kubernetes/apps/keycloak/configuration/manage-passwords/) for examples of each method. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/apps/keycloak/administration/upgrade/). + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/.helmignore b/ansible/roles/helm_install/files/keycloak/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/Chart.yaml b/ansible/roles/helm_install/files/keycloak/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/README.md b/ansible/roles/helm_install/files/keycloak/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/common/values.yaml b/ansible/roles/helm_install/files/keycloak/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/.helmignore b/ansible/roles/helm_install/files/keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.lock b/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.lock new file mode 100644 index 0000000..7163491 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.0 +digest: sha256:b6a17da2d82c85b2f3ad6550d93780de4de63eba4a528ae2d5bc9934c122a856 +generated: "2022-03-26T06:13:30.244559427Z" diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..5301c7d --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 14.2.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x +description: PostgreSQL (Postgres) is an open source object-relational database known + for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, + views, triggers and stored procedures. +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 11.1.22 diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/README.md b/ansible/roles/helm_install/files/keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..9548481 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/README.md @@ -0,0 +1,662 @@ + + +# PostgreSQL packaged by Bitnami + +PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. + +[Overview of PostgreSQL](http://www.postgresql.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```bash +kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` | +| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` | +| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` | +| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` | +| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) | `""` | +| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### PostgreSQL common parameters + +| Name | Description | Value | +| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | PostgreSQL image registry | `docker.io` | +| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `14.2.0-debian-10-r58` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), `password` (which is the password for the custom user to create when `auth.username` is set) and `replication-password` (which is the password for replication user). `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` | `""` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `""` | +| `ldap.bindDN` | DN of user to bind to LDAP | `""` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `""` | +| `ldap.search_attr` | Attribute to match against the user name in the search | `""` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls` | Set to `1` to use TLS encryption | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | + + +### PostgreSQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` | +| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` | +| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` | +| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` | +| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` | +| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` | +| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` | +| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` | +| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` | +| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` | +| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` | +| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` | +| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` | +| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` | +| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` | +| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.command` | Override default container command (useful when using custom images) | `[]` | +| `primary.args` | Override default container args (useful when using custom images) | `[]` | +| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` | +| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` | +| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` | +| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` | +| `primary.podSecurityContext.enabled` | Enable security context | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `primary.containerSecurityContext.enabled` | Enable container security context | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | +| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` | +| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` | +| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` | +| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` | +| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` | +| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` | +| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` | +| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` | +| `primary.service.type` | Kubernetes Service type | `ClusterIP` | +| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` | +| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` | +| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` | +| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `primary.persistence.annotations` | Annotations for the PVC | `{}` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `primary.persistence.dataSource` | Custom PVC data source | `{}` | + + +### PostgreSQL read only replica parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` | +| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` | +| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` | +| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` | +| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` | +| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` | +| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` | +| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` | +| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` | +| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | +| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` | +| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | +| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` | +| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` | +| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` | +| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` | +| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` | +| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` | +| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` | +| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` | +| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` | +| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` | +| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` | +| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` | +| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` | +| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` | +| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` | +| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` | +| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` | +| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` | + + +### NetworkPolicy parameters + +| Name | Description | Value | +| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable network policies | `false` | +| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` | +| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` | +| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` | +| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r388` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.10.1-debian-10-r76` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.postgresPassword=secretpassword + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customizing primary and read replica services in a replicated configuration + +At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object. + +### Use a different PostgreSQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/). + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration` parameter as a string. Alternatively, to replace the entire default configuration use `primary.configuration`. + +You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string. + +In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +- First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +- Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL primary +primary: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +readReplicas: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.auth.password=testuser +subchart1.postgresql.auth.username=testuser +subchart2.postgresql.auth.username=testuser +postgresql.auth.password=testpass +subchart1.postgresql.auth.password=testpass +subchart2.postgresql.auth.password=testpass +postgresql.auth.database=testdb +subchart1.postgresql.auth.database=testdb +subchart2.postgresql.auth.database=testdb +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.auth.username=testuser +global.postgresql.auth.password=testpass +global.postgresql.auth.database=testdb +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```bash +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/). + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/.helmignore b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/Chart.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/README.md b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/values.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/extended-config.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/extended-config.yaml new file mode 100644 index 0000000..224168e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/extended-config.yaml @@ -0,0 +1,4 @@ +primary: + extendedConfiguration: | + pg_stat_statements.max = 10000 + pg_stat_statements.track = all diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/init-scripts.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/init-scripts.yaml new file mode 100644 index 0000000..66ac9bb --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/init-scripts.yaml @@ -0,0 +1,8 @@ +primary: + initdb: + args: --data-checksums + postgresqlWalDir: /bitnami/wal-dir/ + scripts: + my_init_script.sh: | + #!/bin/sh + echo "Success" diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/metrics.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/metrics.yaml new file mode 100644 index 0000000..df26bb2 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/metrics.yaml @@ -0,0 +1,24 @@ +auth: + postgresPassword: adminpassword + username: foo + password: foopassword + database: bar +metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: monitoring + prometheusRule: + enabled: true + namespace: monitoring +networkPolicy: + enabled: true + metrics: + enabled: true + namespaceSelector: + label: monitoring + ingressRules: + primaryAccessOnlyFrom: + enabled: true + podSelector: + "{{ template \"common.names.fullname\" . }}-client": "true" diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/rbac.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/rbac.yaml new file mode 100644 index 0000000..ef92a1b --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/rbac.yaml @@ -0,0 +1,16 @@ +serviceAccount: + create: true + name: custom-sa + automountServiceAccountToken: true +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +psp: + create: true diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/replication.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/replication.yaml new file mode 100644 index 0000000..1ce6cf8 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/replication.yaml @@ -0,0 +1,5 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct +architecture: replication +readReplicas: + replicaCount: 3 diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/tls.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/tls.yaml new file mode 100644 index 0000000..24131f3 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/ci/tls.yaml @@ -0,0 +1,6 @@ +architecture: replication +tls: + enabled: true + autoGenerated: true +volumePermissions: + enabled: true diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/NOTES.txt b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..710c733 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,89 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh + +{{- else }} + +PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster: + + {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection + +{{- if eq .Values.architecture "replication" }} + + {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection + +{{- end }} + +{{- $customUser := include "postgresql.username" . }} +{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.postgres-password}" | base64 --decode) + +To get the password for "{{ $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.password}" | base64 --decode) + +{{- else }} + +To get the password for "{{ default "postgres" $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 --decode) + +{{- end }} + +To connect to your database run the following command: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \ + --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + + > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist" + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.primary.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }}) + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.primary.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.primary.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} & + PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + +{{- end }} +{{- end }} + +{{- include "postgresql.validateValues" . -}} +{{- include "common.warnings.rollingTag" .Values.image -}} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/_helpers.tpl b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..98eb56e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,320 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a default fully qualified app name for PostgreSQL Primary objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} + {{- printf "%s-primary" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL read-only replicas objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.readReplica.fullname" -}} +{{- printf "%s-read" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL primary headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.primary.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL read-only replicas headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.readReplica.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the name for a custom user to create +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.auth.username }} + {{- .Values.global.postgresql.auth.username -}} +{{- else -}} + {{- .Values.auth.username -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name for a custom database to create +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.auth.database }} + {{- .Values.global.postgresql.auth.database -}} +{{- else if .Values.auth.database -}} + {{- .Values.auth.database -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.auth.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}} +{{- else if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.primary.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.readReplica.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.readReplicas.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the configuration +*/}} +{{- define "postgresql.primary.createConfigmap" -}} +{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary extended configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.extendedConfigmapName" -}} +{{- if .Values.primary.existingExtendedConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}} +{{- else -}} + {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the extended configuration +*/}} +{{- define "postgresql.primary.createExtendedConfigmap" -}} +{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "postgresql.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdb.scriptsCM" -}} +{{- if .Values.primary.initdb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +{{- $customUser := include "postgresql.username" . }} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- else }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "postgresql.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- else -}} + {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/extra-list.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/networkpolicy-egress.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/networkpolicy-egress.yaml new file mode 100644 index 0000000..e862147 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/networkpolicy-egress.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - namespaceSelector: {} + {{- end }} + {{- if .Values.networkPolicy.egressRules.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/configmap.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/configmap.yaml new file mode 100644 index 0000000..d654a22 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/configmap.yaml @@ -0,0 +1,24 @@ +{{- if (include "postgresql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.primary.configuration }} + postgresql.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.pgHbaConfiguration }} + pg_hba.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/extended-configmap.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/extended-configmap.yaml new file mode 100644 index 0000000..d129bd3 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/extended-configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "postgresql.primary.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/initialization-configmap.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/initialization-configmap.yaml new file mode 100644 index 0000000..d3d26cb --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-configmap.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-configmap.yaml new file mode 100644 index 0000000..39c4805 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{- toYaml .Values.metrics.customMetrics | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-svc.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-svc.yaml new file mode 100644 index 0000000..75a1b81 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/networkpolicy.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/networkpolicy.yaml new file mode 100644 index 0000000..ce0052d --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/networkpolicy.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + ingress: + {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }} + - from: + {{- if .Values.networkPolicy.metrics.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.metrics.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }} + - from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + app.kubernetes.io/component: read + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/prometheusrule.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/prometheusrule.yaml new file mode 100644 index 0000000..cb2f1f2 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "postgresql.primary.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/servicemonitor.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/servicemonitor.yaml new file mode 100644 index 0000000..c4a19fe --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/statefulset.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/statefulset.yaml new file mode 100644 index 0000000..ec6cdbb --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/statefulset.yaml @@ -0,0 +1,639 @@ +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + serviceName: {{ include "postgresql.primary.svc.headless" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + template: + metadata: + name: {{ include "postgresql.primary.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "postgresql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "postgresql.primary.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.primary.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.primary.hostNetwork }} + hostIPC: {{ .Values.primary.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.primary.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }} + {{- else }} + chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.primary.persistence.mountPath | quote }} + {{- if .Values.primary.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: postgres-password + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + # Replication + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }} + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off") }} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + # Initdb + {{- if .Values.primary.initdb.args }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.primary.initdb.args | quote }} + {{- end }} + {{- if .Values.primary.initdb.postgresqlWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.primary.initdb.postgresqlWalDir | quote }} + {{- end }} + {{- if .Values.primary.initdb.user }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.primary.initdb.user }} + {{- end }} + {{- if .Values.primary.initdb.password }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.primary.initdb.password | quote }} + {{- end }} + # Standby + {{- if .Values.primary.standby.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.primary.standby.primaryHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.primary.standby.primaryPort | quote }} + {{- end }} + # LDAP + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if not (empty .Values.ldap.bind_password) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: ldap-password + {{- end }} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.service.port" .)) (default "postgres" $customUser | quote) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.service.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- else if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + configMap: + name: {{ include "postgresql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.primary.extendedConfigmapName" . }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + configMap: + name: {{ include "postgresql.initdb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.primary.persistence.annotations }} + annotations: {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.primary.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc-headless.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc-headless.yaml new file mode 100644 index 0000000..b782631 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc-headless.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc.yaml new file mode 100644 index 0000000..6d4a842 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/primary/svc.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.loadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.primary.service.nodePorts.postgresql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/psp.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/psp.yaml new file mode 100644 index 0000000..48d1175 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/psp.yaml @@ -0,0 +1,41 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/networkpolicy.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/networkpolicy.yaml new file mode 100644 index 0000000..c969cd7 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + ingress: + {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/statefulset.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/statefulset.yaml new file mode 100644 index 0000000..486d803 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/statefulset.yaml @@ -0,0 +1,433 @@ +{{- if eq .Values.architecture "replication" }} +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.readReplicas.replicaCount }} + serviceName: {{ include "postgresql.readReplica.svc.headless" . }} + {{- if .Values.readReplicas.updateStrategy }} + updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + template: + metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.readReplicas.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.readReplicas.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.readReplicas.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.priorityClassName }} + priorityClassName: {{ .Values.readReplicas.priorityClassName }} + {{- end }} + {{- if .Values.readReplicas.schedulerName }} + schedulerName: {{ .Values.readReplicas.schedulerName | quote }} + {{- end }} + {{- if .Values.readReplicas.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.readReplicas.podSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.readReplicas.hostNetwork }} + hostIPC: {{ .Values.readReplicas.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.readReplicas.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }} + {{- else }} + chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.readReplicas.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.readReplicas.persistence.mountPath | quote }} + {{- if .Values.readReplicas.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: postgres-password + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ include "postgresql.primary.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.service.port" . | quote }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.readReplicas.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }} + envFrom: + {{- if .Values.readReplicas.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.readReplicas.extraEnvVarsCM }} + {{- end }} + {{- if .Values.readReplicas.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.readReplicas.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.readReplicas.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.readReplicas.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.readReplicas.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.readReplicas.persistence.annotations }} + annotations: {{- toYaml .Values.readReplicas.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.readReplicas.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.readReplicas.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.readReplicas.persistence.size | quote }} + {{- if .Values.readReplicas.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc-headless.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc-headless.yaml new file mode 100644 index 0000000..0371e49 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc-headless.yaml @@ -0,0 +1,33 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc.yaml new file mode 100644 index 0000000..2355475 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/read/svc.yaml @@ -0,0 +1,45 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.readReplicas.service.type }} + {{- if and .Values.readReplicas.service.loadBalancerIP (eq .Values.readReplicas.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") .Values.readReplicas.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.loadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "ClusterIP") .Values.readReplicas.service.clusterIP }} + clusterIP: {{ .Values.readReplicas.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }} + {{- else if eq .Values.readReplicas.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.readReplicas.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/role.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..00f9222 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +# yamllint disable rule:indentation +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.psp.create }} + - apiGroups: + - 'policy' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: + - {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +# yamllint enable rule:indentation +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/rolebinding.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..0311c0e --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/secrets.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..e8cc69f --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "postgres-password" "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) }} + {{- end }} + {{- if not (empty (include "postgresql.username" .)) }} + password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "password" "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "replication-password" "providedValues" (list "auth.replicationPassword") "context" $) }} + {{- end }} + # We don't auto-generate LDAP password when it's not provided as we do for other passwords + {{- if and .Values.ldap.enabled .Values.ldap.bind_password }} + ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..179f8f2 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/tls-secrets.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/tls-secrets.yaml new file mode 100644 index 0000000..59c5776 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/templates/tls-secrets.yaml @@ -0,0 +1,27 @@ +{{- if (include "postgresql.createTlsSecret" . ) }} +{{- $ca := genCA "postgresql-ca" 365 }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }} +{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.schema.json b/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..fc41483 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "PostgreSQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enablePostgresUser": { + "type": "boolean", + "title": "Enable \"postgres\" admin user", + "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user", + "form": true + }, + "postgresPassword": { + "type": "string", + "title": "Password for the \"postgres\" admin user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "database": { + "type": "string", + "title": "PostgreSQL custom database", + "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL", + "form": true + }, + "username": { + "type": "string", + "title": "PostgreSQL custom user", + "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for the custom user to create", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "replicationUsername": { + "type": "string", + "title": "PostgreSQL replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for PostgreSQL replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "readReplicas": { + "type": "integer", + "title": "read Replicas", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.yaml b/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..b766f88 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/charts/postgresql/values.yaml @@ -0,0 +1,1329 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.2.0-debian-10-r70 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), `password` (which is the password for the custom user to create when `auth.username` is set) and `replication-password` (which is the password for replication user). `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## The value is evaluated as a template. + ## + existingSecret: "" + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.url LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## @param ldap.baseDN Root DN to begin the search for the user in +## @param ldap.bindDN DN of user to bind to LDAP +## @param ldap.bind_password Password for the user to bind to LDAP +## @param ldap.search_attr Attribute to match against the user name in the search +## @param ldap.search_filter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## @param ldap.tls Set to `1` to use TLS encryption +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: "" + search_attr: "" + search_filter: "" + scheme: "" + tls: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section PostgreSQL read only replica parameters +## +readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section NetworkPolicy parameters + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r400 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.1-debian-10-r88 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/ansible/roles/helm_install/files/keycloak/ci/ct-values.yaml b/ansible/roles/helm_install/files/keycloak/ci/ct-values.yaml new file mode 100644 index 0000000..b738e2a --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/ci/ct-values.yaml @@ -0,0 +1,2 @@ +service: + type: ClusterIP diff --git a/ansible/roles/helm_install/files/keycloak/ci/values-ha.yaml b/ansible/roles/helm_install/files/keycloak/ci/values-ha.yaml new file mode 100644 index 0000000..4d63174 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/ci/values-ha.yaml @@ -0,0 +1,8 @@ +serviceDiscovery: + enabled: true +cache: + ownersCount: 2 + authOwnersCount: 2 +replicaCount: 2 +rbac: + create: true diff --git a/ansible/roles/helm_install/files/keycloak/ci/values-hpa-pdb.yaml b/ansible/roles/helm_install/files/keycloak/ci/values-hpa-pdb.yaml new file mode 100644 index 0000000..d996388 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/ci/values-hpa-pdb.yaml @@ -0,0 +1,4 @@ +autoscaling: + enabled: true +pdb: + create: true diff --git a/ansible/roles/helm_install/files/keycloak/ci/values-init-scripts.yaml b/ansible/roles/helm_install/files/keycloak/ci/values-init-scripts.yaml new file mode 100644 index 0000000..c908973 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/ci/values-init-scripts.yaml @@ -0,0 +1,4 @@ +initdbScripts: + test.sh: | + #!/bin/sh + true diff --git a/ansible/roles/helm_install/files/keycloak/ci/values-metrics-and-ingress.yaml b/ansible/roles/helm_install/files/keycloak/ci/values-metrics-and-ingress.yaml new file mode 100644 index 0000000..45682c1 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/ci/values-metrics-and-ingress.yaml @@ -0,0 +1,9 @@ +ingress: + enabled: true + tls: true + +service: + type: ClusterIP + +metrics: + enabled: true diff --git a/ansible/roles/helm_install/files/keycloak/override-values.yaml b/ansible/roles/helm_install/files/keycloak/override-values.yaml new file mode 100644 index 0000000..c472034 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/override-values.yaml @@ -0,0 +1,60 @@ +auth: + adminUser: admin + adminPassword: "admin" + managementUser: admin + managementPassword: "admin" +proxyAddressForwarding: true +serviceDiscovery: + enabled: true +cache: + ownersCount: 2 + authOwnersCount: 2 +replicaCount: 2 +service: + type: NodePort + nodePorts: + http: "30100" + https: "30101" +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +postgresql: + enabled: false +externalDatabase: + host: "postgresql.dsk-middle.svc.cluster.local" + port: 5432 + user: root + database: keycloak + password: root +ingress: + enabled: true + ingressClassName: "nginx" + hostname: auth.dev.kr.datasaker.io + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + ingress.kubernetes.io/ssl-redirect: "true" + kubernetes.io/tls-acme: "true" + tls: + - hosts: + - auth.dev.kr.datasaker.io + secretName: cert-auth-dev-kr-datasaker +tolerations: +- key: "dev/data-druid" + operator: "Exists" +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/druid-size + operator: In + values: + - small + diff --git a/ansible/roles/helm_install/files/keycloak/templates/NOTES.txt b/ansible/roles/helm_install/files/keycloak/templates/NOTES.txt new file mode 100644 index 0000000..acb9e0b --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +Keycloak can be accessed through the following DNS name from within your cluster: + + {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} (port {{ coalesce .Values.service.ports.http .Values.service.port }}) + +To access Keycloak from outside the cluster execute the following commands: + +{{- if .Values.ingress.enabled }} + +1. Get the Keycloak URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "Keycloak URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/auth" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else }} + +1. Get the Keycloak URL by running these commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "http://${NODE_IP}:${NODE_PORT}/auth" + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "common.names.fullname" . }}' + + export SERVICE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].port}" services {{ include "common.names.fullname" . }}) + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo "http://${SERVICE_IP}:${SERVICE_PORT}/auth" + +{{- else if contains "ClusterIP" .Values.service.type }} + + export SERVICE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].port}" services {{ include "common.names.fullname" . }}) + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} ${SERVICE_PORT}:${SERVICE_PORT} & + echo "http://127.0.0.1:${SERVICE_PORT}/auth" + +{{- end }} +{{- end }} + +2. Access Keycloak using the obtained URL. +{{- if .Values.auth.createAdminUser }} +3. Access the Administration Console using the following credentials: + + echo Username: {{ .Values.auth.adminUser }} +{{- if not .Values.auth.existingSecretPerPassword }} + echo Password: $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecret "context" $) }} -o jsonpath="{.data.admin-password}" | base64 --decode) +{{- else }} + echo Password: $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.adminPassword "context" $) }} -o jsonpath="\{ {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "adminPassword") }} \}" | base64 --decode) +{{- end }} +{{- end }} +{{- if .Values.metrics.enabled }} + +You can access the Prometheus metrics following the steps below: + +1. Get the Keycloak Prometheus metrics URL by running: + + {{- $metricsPort := coalesce .Values.metrics.service.ports.http .Values.metrics.service.port | toString }} + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-metrics" (include "keycloak.fullname" .) }} {{ $metricsPort }}:{{ $metricsPort }} & + echo "Keycloak Prometheus metrics URL: http://127.0.0.1:{{ $metricsPort }}/metrics" + +2. Open a browser and access Keycloak Prometheus metrics using the obtained URL. + +{{- end }} + +{{- include "keycloak.validateValues" . }} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.keycloakConfigCli.image }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/_helpers.tpl b/ansible/roles/helm_install/files/keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..7e26914 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/_helpers.tpl @@ -0,0 +1,273 @@ +{{/* +Create a default fully qualified app name. +We truncate at 20 chars since the node identifier in WildFly is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +If release name contains chart name it will be used as a full name. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 20 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Keycloak image name +*/}} +{{- define "keycloak.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper keycloak-config-cli image name +*/}} +{{- define "keycloak.keycloakConfigCli.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.keycloakConfigCli.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the keycloak-config-cli configuration configmap. +*/}} +{{- define "keycloak.keycloakConfigCli.configmapName" -}} +{{- if .Values.keycloakConfigCli.existingConfigmap -}} + {{- printf "%s" (tpl .Values.keycloakConfigCli.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-keycloak-config-cli-configmap" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for keycloak-config-cli +*/}} +{{- define "keycloak.keycloakConfigCli.createConfigmap" -}} +{{- if and .Values.keycloakConfigCli.enabled .Values.keycloakConfigCli.configuration (not .Values.keycloakConfigCli.existingConfigmap) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "keycloak.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.keycloakConfigCli.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "postgresql" "chartValues" .Values.postgresql "context" $) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the Keycloak configuration configmap +*/}} +{{- define "keycloak.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "keycloak.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "keycloak.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Database hostname +*/}} +{{- define "keycloak.databaseHost" -}} +{{- ternary (include "keycloak.postgresql.fullname" .) .Values.externalDatabase.host .Values.postgresql.enabled -}} +{{- end -}} + +{{/* +Return the Database port +*/}} +{{- define "keycloak.databasePort" -}} +{{- ternary "5432" .Values.externalDatabase.port .Values.postgresql.enabled | quote -}} +{{- end -}} + +{{/* +Return the Database database name +*/}} +{{- define "keycloak.databaseName" -}} +{{- if .Values.postgresql.enabled }} + {{- if .Values.global.postgresql }} + {{- if .Values.global.postgresql.auth }} + {{- coalesce .Values.global.postgresql.auth.database .Values.postgresql.auth.database -}} + {{- else -}} + {{- .Values.postgresql.auth.database -}} + {{- end -}} + {{- else -}} + {{- .Values.postgresql.auth.database -}} + {{- end -}} +{{- else -}} + {{- .Values.externalDatabase.database -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Database user +*/}} +{{- define "keycloak.databaseUser" -}} +{{- if .Values.postgresql.enabled }} + {{- if .Values.global.postgresql }} + {{- if .Values.global.postgresql.auth }} + {{- coalesce .Values.global.postgresql.auth.username .Values.postgresql.auth.username -}} + {{- else -}} + {{- .Values.postgresql.auth.username -}} + {{- end -}} + {{- else -}} + {{- .Values.postgresql.auth.username -}} + {{- end -}} +{{- else -}} + {{- .Values.externalDatabase.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Database encrypted password +*/}} +{{- define "keycloak.databaseSecretName" -}} +{{- if .Values.postgresql.enabled }} + {{- if .Values.global.postgresql }} + {{- if .Values.global.postgresql.auth }} + {{- if .Values.global.postgresql.auth.existingSecret }} + {{- tpl .Values.global.postgresql.auth.existingSecret $ -}} + {{- else -}} + {{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}} + {{- end -}} + {{- else -}} + {{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}} + {{- end -}} + {{- else -}} + {{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}} + {{- end -}} +{{- else -}} + {{- default (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecret "context" $)) (tpl .Values.externalDatabase.existingSecret $) -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "keycloak.databaseSecretKey" -}} +{{- if .Values.postgresql.enabled -}} + {{- print "password" -}} +{{- else -}} + {{- if .Values.externalDatabase.existingSecret -}} + {{- if .Values.externalDatabase.existingSecretPasswordKey -}} + {{- printf "%s" .Values.externalDatabase.existingSecretPasswordKey -}} + {{- else -}} + {{- print "password" -}} + {{- end -}} + {{- else -}} + {{- print "password" -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Keycloak initdb scripts configmap +*/}} +{{- define "keycloak.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} + {{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "keycloak.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing AppName TLS certificates +*/}} +{{- define "keycloak.tlsSecretName" -}} +{{- $secretName := coalesce .Values.auth.tls.existingSecret .Values.auth.tls.jksSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "keycloak.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled .Values.auth.tls.autoGenerated (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.jksSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "keycloak.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "keycloak.validateValues.replicaCount" .) -}} +{{- $messages := append $messages (include "keycloak.validateValues.database" .) -}} +{{- $messages := append $messages (include "keycloak.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Keycloak - number of replicas */}} +{{- define "keycloak.validateValues.replicaCount" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- if and (not .Values.serviceDiscovery.enabled) (gt $replicaCount 1) -}} +keycloak: replicaCount + You need to configure the ServiceDiscovery settings to run more than 1 replica. + Enable the Service Discovery (--set serviceDiscovery.enabled=true) and + set the Service Discovery protocol (--set serviceDiscovery.protocol="FOO") and + the Service Discovery properties (--set serviceDiscovery.properties[0]="BAR") if needed. +{{- end -}} +{{- end -}} + +{{/* Validate values of Keycloak - database */}} +{{- define "keycloak.validateValues.database" -}} +{{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.host) (not .Values.externalDatabase.existingSecret) -}} +keycloak: database + You disabled the PostgreSQL sub-chart but did not specify an external PostgreSQL host. + Either deploy the PostgreSQL sub-chart (--set postgresql.enabled=true), + or set a value for the external database host (--set externalDatabase.host=FOO) + or set a value for the external database existing secret (--set externalDatabase.existingSecret=BAR). +{{- end -}} +{{- end -}} + +{{/* Validate values of Keycloak - Auth TLS enabled */}} +{{- define "keycloak.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.jksSecret) }} +keycloak: auth.tls.enabled + In order to enable TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/keycloak/templates/configmap-env-vars.yaml b/ansible/roles/helm_install/files/keycloak/templates/configmap-env-vars.yaml new file mode 100644 index 0000000..4a26314 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/configmap-env-vars.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-env-vars" (include "keycloak.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + KEYCLOAK_CREATE_ADMIN_USER: {{ ternary "true" "false" .Values.auth.createAdminUser | quote }} + KEYCLOAK_ADMIN_USER: {{ .Values.auth.adminUser | quote }} + KEYCLOAK_MANAGEMENT_USER: {{ .Values.auth.managementUser | quote }} + KEYCLOAK_HTTP_PORT: {{ .Values.containerPorts.http | quote }} + KEYCLOAK_PROXY_ADDRESS_FORWARDING: {{ ternary "true" "false" .Values.proxyAddressForwarding | quote }} + KEYCLOAK_ENABLE_STATISTICS: {{ ternary "true" "false" .Values.metrics.enabled | quote }} + KEYCLOAK_DATABASE_HOST: {{ include "keycloak.databaseHost" . | quote }} + KEYCLOAK_DATABASE_PORT: {{ include "keycloak.databasePort" . }} + KEYCLOAK_DATABASE_NAME: {{ include "keycloak.databaseName" . | quote }} + KEYCLOAK_DATABASE_USER: {{ include "keycloak.databaseUser" . | quote }} + {{- if .Values.serviceDiscovery.enabled }} + KEYCLOAK_JGROUPS_DISCOVERY_PROTOCOL: {{ .Values.serviceDiscovery.protocol | quote }} + KEYCLOAK_JGROUPS_DISCOVERY_PROPERTIES: {{ (tpl (join "," .Values.serviceDiscovery.properties) $) | quote }} + KEYCLOAK_JGROUPS_TRANSPORT_STACK: {{ .Values.serviceDiscovery.transportStack | quote }} + {{- end }} + KEYCLOAK_CACHE_OWNERS_COUNT: {{ .Values.cache.ownersCount | quote }} + KEYCLOAK_AUTH_CACHE_OWNERS_COUNT: {{ .Values.cache.authOwnersCount | quote }} + KEYCLOAK_ENABLE_TLS: {{ ternary "true" "false" .Values.auth.tls.enabled | quote }} + {{- if .Values.auth.tls.enabled }} + KEYCLOAK_HTTPS_PORT: {{ .Values.containerPorts.https | quote }} + KEYCLOAK_TLS_KEYSTORE_FILE: "/opt/bitnami/keycloak/certs/keycloak.keystore.jks" + KEYCLOAK_TLS_TRUSTSTORE_FILE: "/opt/bitnami/keycloak/certs/keycloak.truststore.jks" + {{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/configmap.yaml b/ansible/roles/helm_install/files/keycloak/templates/configmap.yaml new file mode 100644 index 0000000..393d645 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "keycloak.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "keycloak.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + standalone-ha.xml: |- + {{- .Values.configuration | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/extra-list.yaml b/ansible/roles/helm_install/files/keycloak/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/headless-service.yaml b/ansible/roles/helm_install/files/keycloak/templates/headless-service.yaml new file mode 100644 index 0000000..1130100 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/headless-service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "keycloak.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ coalesce .Values.service.ports.http .Values.service.port }} + protocol: TCP + targetPort: http + {{- if .Values.auth.tls.enabled }} + - name: https + port: {{ coalesce .Values.service.ports.https .Values.service.httpsPort }} + protocol: TCP + targetPort: https + {{- end }} + publishNotReadyAddresses: true + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: keycloak diff --git a/ansible/roles/helm_install/files/keycloak/templates/hpa.yaml b/ansible/roles/helm_install/files/keycloak/templates/hpa.yaml new file mode 100644 index 0000000..7a09b44 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/ingress.yaml b/ansible/roles/helm_install/files/keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d2bd407 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ .Values.ingress.hostname | quote }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "keycloak.fullname" .) "servicePort" .Values.ingress.servicePort "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "keycloak.fullname" $) "servicePort" $.Values.ingress.servicePort "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/init-scripts-configmap.yaml b/ansible/roles/helm_install/files/keycloak/templates/init-scripts-configmap.yaml new file mode 100644 index 0000000..99687dc --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/init-scripts-configmap.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "keycloak.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{ end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-configmap.yaml b/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-configmap.yaml new file mode 100644 index 0000000..9a6cfd8 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-configmap.yaml @@ -0,0 +1,21 @@ +{{- if (include "keycloak.keycloakConfigCli.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.keycloakConfigCli.configmapName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak-config-cli + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- range $fileName, $fileContent := .Values.keycloakConfigCli.configuration }} + {{- if $fileContent }} + {{ $fileName }}: | + {{- include "common.tplvalues.render" (dict "value" $fileContent "context" $) | nindent 4 }} + {{- else }} + {{- ($.Files.Glob $fileName).AsConfig | nindent 2 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-job.yaml b/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-job.yaml new file mode 100644 index 0000000..17807e1 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/keycloak-config-cli-job.yaml @@ -0,0 +1,120 @@ +{{- if .Values.keycloakConfigCli.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "keycloak.fullname" . }}-keycloak-config-cli + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak-config-cli + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.keycloakConfigCli.annotations "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + backoffLimit: {{ .Values.keycloakConfigCli.backoffLimit }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: keycloak-config-cli + {{- if .Values.keycloakConfigCli.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "keycloak.keycloakConfigCli.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/keycloak-config-cli-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.keycloakConfigCli.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "keycloak.serviceAccountName" . }} + {{- include "keycloak.imagePullSecrets" . | nindent 6 }} + restartPolicy: Never + {{- if .Values.keycloakConfigCli.podSecurityContext.enabled }} + securityContext: {{- omit .Values.keycloakConfigCli.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.keycloakConfigCli.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.hostAliases "context" $) | nindent 8 }} + {{- end }} + containers: + - name: keycloak-config-cli + image: {{ template "keycloak.keycloakConfigCli.image" . }} + imagePullPolicy: {{ .Values.keycloakConfigCli.image.pullPolicy }} + {{- if .Values.keycloakConfigCli.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.command "context" $) | nindent 12 }} + {{- else }} + command: + - java + - -jar + - {{ printf "/opt/bitnami/keycloak-config-cli/keycloak-config-cli-%s.jar" .Chart.AppVersion }} + {{- end }} + {{- if .Values.keycloakConfigCli.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.keycloakConfigCli.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.keycloakConfigCli.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_URL + value: {{ printf "http://%s-headless:%d/auth" (include "keycloak.fullname" .) (.Values.containerPorts.http | int) }} + - name: KEYCLOAK_USER + value: {{ .Values.auth.adminUser | quote }} + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.adminPassword "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "adminPassword") }} + {{- else }} + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "admin-password") }} + {{- end }} + {{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }} + - name: IMPORT_PATH + value: /config/ + {{- end }} + - name: KEYCLOAK_AVAILABILITYCHECK_ENABLED + value: "true" + {{- if .Values.keycloakConfigCli.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.keycloakConfigCli.extraEnvVarsCM .Values.keycloakConfigCli.extraEnvVarsSecret }} + envFrom: + {{- if .Values.keycloakConfigCli.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.keycloakConfigCli.extraEnvVarsCM }} + {{- end }} + {{- if .Values.keycloakConfigCli.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.keycloakConfigCli.extraEnvVarsSecret }} + {{- end }} + {{- end }} + {{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap .Values.keycloakConfigCli.extraVolumeMounts }} + volumeMounts: + {{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }} + - name: config-volume + mountPath: /config + {{- end }} + {{- if .Values.keycloakConfigCli.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.keycloakConfigCli.resources }} + resources: {{- toYaml .Values.keycloakConfigCli.resources | nindent 12 }} + {{- end }} + {{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap .Values.keycloakConfigCli.extraVolumes }} + volumes: + {{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }} + - name: config-volume + configMap: + name: {{ include "keycloak.keycloakConfigCli.configmapName" . }} + {{- end }} + {{- if .Values.keycloakConfigCli.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/metrics-service.yaml b/ansible/roles/helm_install/files/keycloak/templates/metrics-service.yaml new file mode 100644 index 0000000..fa31d6b --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/metrics-service.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "keycloak.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-management + port: {{ coalesce .Values.metrics.service.ports.http .Values.metrics.service.port }} + protocol: TCP + targetPort: http-management + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: keycloak +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/networkpolicy.yaml b/ansible/roles/helm_install/files/keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..0f6d77f --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/networkpolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: {{ .Values.containerPorts.http }} + {{- if .Values.auth.tls.enabled }} + - port: {{ .Values.containerPorts.https }} + {{- end }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.containerPorts.management }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "keycloak.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + app.kubernetes.io/component: keycloak + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/pdb.yaml b/ansible/roles/helm_install/files/keycloak/templates/pdb.yaml new file mode 100644 index 0000000..2aad4f4 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: keycloak +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/role.yaml b/ansible/roles/helm_install/files/keycloak/templates/role.yaml new file mode 100644 index 0000000..08d08b0 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/role.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/rolebinding.yaml b/ansible/roles/helm_install/files/keycloak/templates/rolebinding.yaml new file mode 100644 index 0000000..6e693ab --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/secrets.yaml b/ansible/roles/helm_install/files/keycloak/templates/secrets.yaml new file mode 100644 index 0000000..0ffc192 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/secrets.yaml @@ -0,0 +1,31 @@ +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.existingSecretPerPassword) }} +{{- $secretName := include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecret "context" $)}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + admin-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "admin-password" "length" 10 "providedValues" (list "auth.adminPassword") "context" $) }} + management-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "management-password" "providedValues" (list "auth.managementPassword") "context" $) }} + {{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.existingSecret) }} + password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "password" "length" 10 "providedValues" (list "externalDatabase.password") "context" $) }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + {{- if or .Values.auth.tls.keystorePassword .Values.auth.tls.autoGenerated .Values.auth.tls.usePem }} + tls-keystore-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "tls-keystore-password" "length" 10 "providedValues" (list "auth.tls.keystorePassword") "context" $) }} + {{- end }} + {{- if or .Values.auth.tls.truststorePassword .Values.auth.tls.autoGenerated .Values.auth.tls.usePem }} + tls-truestore-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "tls-truestore-password" "length" 10 "providedValues" (list "auth.tls.truststorePassword") "context" $) }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/service.yaml b/ansible/roles/helm_install/files/keycloak/templates/service.yaml new file mode 100644 index 0000000..f6230b7 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/service.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + loadBalancerSourceRanges: + {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ coalesce .Values.service.ports.http .Values.service.port }} + protocol: TCP + targetPort: http + {{- if (and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http))) }} + nodePort: {{ .Values.service.nodePorts.http }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: https + port: {{ coalesce .Values.service.ports.https .Values.service.httpsPort }} + protocol: TCP + targetPort: https + {{- if (and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.https))) }} + nodePort: {{ .Values.service.nodePorts.https }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: keycloak diff --git a/ansible/roles/helm_install/files/keycloak/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..f8cff00 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/servicemonitor.yaml b/ansible/roles/helm_install/files/keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..b7040a3 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + endpoints: + - port: http-management + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/statefulset.yaml b/ansible/roles/helm_install/files/keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..762c5a7 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/statefulset.yaml @@ -0,0 +1,361 @@ +{{- $globalSecretName := printf "%s" (tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecret "context" $)) $) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "keycloak.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + serviceName: {{ printf "%s-headless" (include "keycloak.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: + {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: keycloak + template: + metadata: + annotations: + checksum/configmap-env-vars: {{ include (print $.Template.BasePath "/configmap-env-vars.yaml") . | sha256sum }} + {{- if and (not .Values.auth.existingSecret) (not .Values.auth.existingSecretPerPassword) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if (include "keycloak.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: keycloak + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "keycloak.serviceAccountName" . }} + {{- include "keycloak.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers .Values.auth.tls.enabled }} + initContainers: + {{- if .Values.auth.tls.enabled }} + {{- $fullname := include "keycloak.fullname" . }} + - name: init-certs + image: {{ include "keycloak.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -ec + - |- + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + {{- if or .Values.auth.tls.autoGenerated .Values.auth.tls.usePem }} + if [[ -f "/certs/keycloak-${ID}.key" ]] && [[ -f "/certs/keycloak-${ID}.crt" ]] && [[ -f "/certs/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/keycloak-${ID}.crt" \ + -passout pass:"${KEYCLOAK_TLS_KEYSTORE_PASSWORD}" \ + -inkey "/certs/keycloak-${ID}.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${KEYCLOAK_TLS_KEYSTORE_PASSWORD}" \ + -deststorepass "${KEYCLOAK_TLS_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/keycloak/certs/keycloak.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/ca.crt" \ + -keystore "/opt/bitnami/keycloak/certs/keycloak.truststore.jks" \ + -storepass "${KEYCLOAK_TLS_TRUSTSTORE_PASSWORD}" \ + -noprompt + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- else if and .Values.auth.tls.truststoreFilename .Values.auth.tls.keystoreFilename }} + if [[ -f {{ printf "/certs/%s" .Values.auth.tls.truststoreFilename | quote }} ]] && [[ -f {{ printf "/certs/%s" .Values.auth.tls.keystoreFilename | quote }} ]]; then + cp {{ printf "/certs/%s" .Values.auth.tls.truststoreFilename | quote }} "/opt/bitnami/keycloak/certs/keycloak.truststore.jks" + cp {{ printf "/certs/%s" .Values.auth.tls.keystoreFilename | quote }} "/opt/bitnami/keycloak/certs/keycloak.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- else }} + if [[ -f "/certs/keycloak.truststore.jks" ]] && [[ -f "/certs/keycloak-${ID}.keystore.jks" ]]; then + cp "/certs/keycloak.truststore.jks" "/opt/bitnami/keycloak/certs/keycloak.truststore.jks" + cp "/certs/keycloak-${ID}.keystore.jks" "/opt/bitnami/keycloak/certs/keycloak.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.auth.tls.keystorePassword .Values.auth.existingSecretPerPassword .Values.auth.tls.autoGenerated }} + - name: KEYCLOAK_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.tlsKeystorePassword "context" $)) $ }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "tlsKeystorePassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "tls-keystore-password") }} + {{- end }} + {{- end }} + {{- if or .Values.auth.tls.truststorePassword .Values.auth.existingSecretPerPassword .Values.auth.tls.autoGenerated }} + - name: KEYCLOAK_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.tlsTruststorePassword "context" $)) $ }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "tlsKeystorePassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "tls-truestore-password") }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.resources }} + resources: {{- toYaml .Values.auth.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: certificates + mountPath: /certs + - name: shared-certs + mountPath: /opt/bitnami/keycloak/certs + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + image: {{ template "keycloak.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: KEYCLOAK_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.adminPassword "context" $)) $ }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "adminPassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "admin-password") }} + {{- end }} + - name: KEYCLOAK_MANAGEMENT_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.managementPassword "context" $)) $ }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "managementPassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "management-password") }} + {{- end }} + - name: KEYCLOAK_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.databasePassword "context" $)) $ }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "databasePassword") }} + {{- else }} + name: {{ include "keycloak.databaseSecretName" . }} + key: {{ include "keycloak.databaseSecretKey" . }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + {{- if or .Values.auth.tls.keystorePassword .Values.auth.existingSecretPerPassword }} + - name: KEYCLOAK_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.tlsKeystorePassword "context" $) $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "tlsKeystorePassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "tls-keystore-password") }} + {{- end }} + {{- end }} + {{- if or .Values.auth.tls.truststorePassword .Values.auth.existingSecretPerPassword }} + - name: KEYCLOAK_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.auth.existingSecretPerPassword }} + name: {{ tpl (include "common.secrets.name" (dict "existingSecret" .Values.auth.existingSecretPerPassword.tlsTruststorePassword "context" $) $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecretPerPassword "key" "tlsKeystorePassword") }} + {{- else }} + name: {{ $globalSecretName }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.auth.existingSecret "key" "tls-truestore-password") }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraStartupArgs }} + - name: KEYCLOAK_EXTRA_ARGS + value: {{ .Values.extraStartupArgs | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ printf "%s-env-vars" (include "keycloak.fullname" .) }} + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if and .Values.externalDatabase.existingSecret (not .Values.postgresql.enabled) }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.externalDatabase.existingSecret "context" $) }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.containerPorts.http }} + protocol: TCP + - name: https + containerPort: {{ .Values.containerPorts.https }} + protocol: TCP + - name: http-management + containerPort: {{ .Values.containerPorts.management }} + protocol: TCP + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.startupProbe.enabled }} + startupProbe: {{- omit .Values.startupProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /auth/ + port: http + {{- else if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /auth/ + port: http + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /auth/realms/master + port: http + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: keycloak-config + mountPath: /bitnami/keycloak/configuration/standalone-ha.xml + subPath: standalone-ha.xml + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: shared-certs + mountPath: /opt/bitnami/keycloak/certs + readOnly: true + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: keycloak-config + configMap: + name: {{ include "keycloak.configmapName" . }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certificates + secret: + secretName: {{ include "keycloak.tlsSecretName" . }} + defaultMode: 420 + - name: shared-certs + emptyDir: {} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "keycloak.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/keycloak/templates/tls-secret.yaml b/ansible/roles/helm_install/files/keycloak/templates/tls-secret.yaml new file mode 100644 index 0000000..b1d4a2d --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/templates/tls-secret.yaml @@ -0,0 +1,76 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "keycloak-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "keycloak.createTlsSecret" $) }} +{{- $ca := genCA "keycloak-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "keycloak.fullname" . }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: keycloak + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- $replicaCount := int .Values.replicaCount }} + {{- range $i := until $replicaCount }} + {{- $replicaHost := printf "%s-%d.%s-headless" $fullname $i $fullname }} + {{- $altNames := list (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $replicaHost $releaseNamespace) (printf "%s.%s" $fullname $releaseNamespace) $replicaHost $fullname }} + {{- $crt := genSignedCert $replicaHost nil $altNames 365 $ca }} + keycloak-{{ $i }}.crt: {{ $crt.Cert | b64enc | quote }} + keycloak-{{ $i }}.key: {{ $crt.Key | b64enc | quote }} + {{- end }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +{{- end }} + diff --git a/ansible/roles/helm_install/files/keycloak/values.yaml b/ansible/roles/helm_install/files/keycloak/values.yaml new file mode 100644 index 0000000..a192126 --- /dev/null +++ b/ansible/roles/helm_install/files/keycloak/values.yaml @@ -0,0 +1,951 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override keycloak.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override keycloak.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the the statefulset + ## + args: + - infinity + +## @section Keycloak parameters + +## Bitnami Keycloak image version +## ref: https://hub.docker.com/r/bitnami/keycloak/tags/ +## @param image.registry Keycloak image registry +## @param image.repository Keycloak image repository +## @param image.tag Keycloak image tag (immutable tags are recommended) +## @param image.pullPolicy Keycloak image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/keycloak + tag: 16.1.1-debian-10-r85 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Keycloak authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-keycloak#admin-credentials +## +auth: + ## @param auth.createAdminUser Create administrator user on boot + ## + createAdminUser: true + ## @param auth.adminUser Keycloak administrator user + ## + adminUser: user + ## @param auth.adminPassword Keycloak administrator password for the new user + ## + adminPassword: "" + ## @param auth.managementUser Wildfly management user + ## + managementUser: manager + ## @param auth.managementPassword Wildfly management password + ## + managementPassword: "" + ## @param auth.existingSecret An already existing secret containing auth info + ## e.g: + ## existingSecret: + ## name: mySecret + ## keyMapping: + ## admin-password: myPasswordKey + ## management-password: myManagementPasswordKey + ## tls-keystore-password: myTlsKeystorePasswordKey + ## tls-truestore-password: myTlsTruestorePasswordKey + ## + existingSecret: "" + ## @param auth.existingSecretPerPassword Override `existingSecret` and other secret values + ## e.g: + ## existingSecretPerPassword: + ## keyMapping: + ## adminPassword: KEYCLOAK_ADMIN_PASSWORD + ## managementPassword: KEYCLOAK_MANAGEMENT_PASSWORD + ## databasePassword: password + ## tlsKeystorePassword: JKS_KEYSTORE_TRUSTSTORE_PASSWORD + ## tlsTruststorePassword: JKS_KEYSTORE_TRUSTSTORE_PASSWORD + ## adminPassword: + ## name: keycloak-test2.credentials ## release-name + ## managementPassword: + ## name: keycloak-test2.credentials + ## databasePassword: + ## name: keycloak.pocwatt-keycloak-cluster.credentials + ## tlsKeystorePassword: + ## name: keycloak-test2.credentials + ## tlsTruststorePassword: + ## name: keycloak-test2.credentials + ## + existingSecretPerPassword: {} + ## TLS encryption parameters + ## ref: https://github.com/bitnami/bitnami-docker-keycloak#tls-encryption + ## + tls: + ## @param auth.tls.enabled Enable TLS encryption + ## + enabled: false + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates. Currently only supports PEM certificates + ## + autoGenerated: false + ## @param auth.tls.existingSecret Existing secret containing the TLS certificates per Keycloak replica + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files (more info at https://www.keycloak.org/docs/latest/server_installation/#_setting_up_ssl) + ## 2) Rename your truststore to `keycloak.truststore.jks`. + ## 3) Rename your keystores to `keycloak-X.keystore.jks` where X is the ID of each Keycloak replica + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./keycloak.truststore.jks --from-file=./keycloak-0.keystore.jks --from-file=./keycloak-1.keystore.jks ... + ## + existingSecret: "" + ## @param auth.tls.usePem Use PEM certificates as input instead of PKS12/JKS stores + ## If "true", the Keycloak chart will look for the files ca.crt, keycloak-X.key and keycloak-X.crt inside the secret provided with 'existingSecret'. + ## If keystorePassword and truststorePassword are not provided, they will be autogenerated. + ## + usePem: false + ## @param auth.tls.truststoreFilename Truststore specific filename inside the existing secret + ## Note: Setting up this value, you will use the same trustore file in all the replicas + ## + truststoreFilename: "" + ## @param auth.tls.keystoreFilename Keystore specific filename inside the existing secret + ## Note: Setting up this value, you will use the same trustore file in all the replicas + ## + keystoreFilename: "" + ## @param auth.tls.jksSecret DEPRECATED. Use `auth.tls.existingSecret` instead + ## + jksSecret: "" + ## @param auth.tls.keystorePassword Password to access the keystore when it's password-protected + ## + keystorePassword: "" + ## @param auth.tls.truststorePassword Password to access the truststore when it's password-protected + ## + truststorePassword: "" + ## Init containers' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param auth.tls.resources.limits The resources limits for the TLS init container + ## @param auth.tls.resources.requests The requested resources for the TLS init container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} +## @param proxyAddressForwarding Enable Proxy Address Forwarding +## ref: https://www.keycloak.org/docs/latest/server_installation/#_setting-up-a-load-balancer-or-proxy +## +proxyAddressForwarding: false +## Keycloak Service Discovery settings +## ref: https://github.com/bitnami/bitnami-docker-keycloak#cluster-configuration +## +serviceDiscovery: + ## @param serviceDiscovery.enabled Enable Service Discovery for Keycloak (required if `replicaCount` > `1`) + ## + enabled: false + ## @param serviceDiscovery.protocol Sets the protocol that Keycloak nodes would use to discover new peers + ## Available protocols can be found at http://www.jgroups.org/javadoc3/org/jgroups/protocols/ + ## + protocol: kubernetes.KUBE_PING + ## @param serviceDiscovery.properties Properties for the discovery protocol set in `serviceDiscovery.protocol` parameter + ## List of key=>value pairs + ## Example: + ## properties: + ## - datasource_jndi_name=>"java:jboss/datasources/KeycloakDS" + ## - initialize_sql=>"CREATE TABLE IF NOT EXISTS JGROUPSPING ( own_addr varchar(200) NOT NULL, cluster_name varchar(200) NOT NULL, created timestamp default current_timestamp, ping_data BYTEA, constraint PK_JGROUPSPING PRIMARY KEY (own_addr, cluster_name))" + ## + properties: [] + ## @param serviceDiscovery.transportStack Transport stack for the discovery protocol set in `serviceDiscovery.protocol` parameter + ## + transportStack: tcp +## Keycloak cache settings +## ref: https://github.com/bitnami/bitnami-docker-keycloak#cluster-configuration +## +cache: + ## @param cache.ownersCount Number of nodes that will replicate cached data + ## + ownersCount: 1 + ## @param cache.authOwnersCount Number of nodes that will replicate cached authentication data + ## + authOwnersCount: 1 +## @param configuration Keycloak Configuration. Auto-generated based on other parameters when not specified +## Specify content for standalone-ha.xml +## NOTE: This will override configuring Keycloak based on environment variables (including those set by the chart) +## The standalone-ha.xml is auto-generated based on other parameters when this parameter is not specified +## +## Example: +## configuration: |- +## foo: bar +## baz: +## +configuration: "" +## @param existingConfigmap Name of existing ConfigMap with Keycloak configuration +## NOTE: When it's set the configuration parameter is ignored +## +existingConfigmap: "" +## @param extraStartupArgs Extra default startup args +## +extraStartupArgs: "" +## @param initdbScripts Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## ref: https://github.com/bitnami/bitnami-docker-keycloak#initializing-a-new-instance +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsConfigMap: "" +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param extraEnvVars Extra environment variables to be set on Keycloak container +## Example: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars +## +extraEnvVarsSecret: "" + +## @section Keycloak statefulset parameters + +## @param replicaCount Number of Keycloak replicas to deploy +## +replicaCount: 1 +## @param containerPorts.http Keycloak HTTP container port +## @param containerPorts.https Keycloak HTTPS container port +## @param containerPorts.management Keycloak management HTTP container port +## +containerPorts: + http: 8080 + https: 8443 + management: 9990 +## Keycloak pods' SecurityContext +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled Keycloak pods' Security Context +## @param podSecurityContext.fsGroup Set Keycloak pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Keycloak containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled Keycloak containers' Security Context +## @param containerSecurityContext.runAsUser Set Keycloak container's Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Keycloak container's Security Context runAsNonRoot +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## Keycloak resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the Keycloak containers +## @param resources.requests The requested resources for the Keycloak containers +## +resources: + limits: {} + requests: {} +## Configure extra options for Keycloak containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Keycloak containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 300 + periodSeconds: 1 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Keycloak containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## When enabling this, make sure to set initialDelaySeconds to 0 for livenessProbe and readinessProbe +## @param startupProbe.enabled Enable startupProbe on Keycloak containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + successThreshold: 1 +## @param customLivenessProbe Custom Liveness probes for Keycloak +## +customLivenessProbe: {} +## @param customReadinessProbe Custom Rediness probes Keycloak +## +customReadinessProbe: {} +## @param customStartupProbe Custom Startup probes for Keycloak +## +customStartupProbe: {} +## @param lifecycleHooks LifecycleHooks to set additional configuration at startup +## +lifecycleHooks: {} +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for Keycloak pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for Keycloak pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} +## @param podManagementPolicy Pod management policy for the Keycloak statefulset +## +podManagementPolicy: Parallel +## @param priorityClassName Keycloak pods' Priority Class Name +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param terminationGracePeriodSeconds Seconds Keycloak pod needs to terminate gracefully +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +terminationGracePeriodSeconds: "" +## @param updateStrategy.type Keycloak statefulset strategy type +## @param updateStrategy.rollingUpdate Keycloak statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for Keycloak pods +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Keycloak container(s) +## +extraVolumeMounts: [] +## @param initContainers Add additional init containers to the Keycloak pods +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add additional sidecar containers to the Keycloak pods +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## @section Exposure parameters +## + +## Service configuration +## +service: + ## @param service.type Kubernetes service type + ## + type: LoadBalancer + ## @param service.ports.http Keycloak service HTTP port + ## @param service.ports.https Keycloak service HTTPS port + ## + ports: + http: 80 + https: 443 + ## @param service.nodePorts [object] Specify the nodePort values for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + http: "" + https: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.clusterIP Keycloak service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP loadBalancerIP for the SuiteCRM Service (optional, cloud specific) + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Keycloak service + ## + annotations: {} + ## @param service.extraPorts Extra port to expose on Keycloak service + ## + extraPorts: [] + +## Keycloak ingress parameters +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for Keycloak + ## + enabled: false + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: keycloak.local + ## @param ingress.path Default path for the ingress record + ## + path: / + ## @param ingress.servicePort Backend service port to use + ## Default is http. Alternative is https. + ## + servicePort: http + ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting the corresponding annotations + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: keycloak.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host. + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## extraTls: + ## - hosts: + ## - keycloak.local + ## secretName: keycloak.local-tls + ## + extraTls: [] + ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## - name: keycloak.local-tls + ## key: + ## certificate: + ## + secrets: [] + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable the default NetworkPolicy policy + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Keycloak is listening + ## on. When true, Keycloak will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy rules + ## Note that all rules are OR-ed. + ## Example: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: {} + +## @section RBAC parameter +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for Keycloak pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Specifies whether RBAC resources should be created +## +rbac: + ## @param rbac.create Whether to create and use RBAC resources or not + ## + create: false + ## @param rbac.rules Custom RBAC rules + ## Example: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + +## @section Other parameters +## + +## Keycloak Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" +## Keycloak Autoscaling configuration +## @param autoscaling.enabled Enable autoscaling for Keycloak +## @param autoscaling.minReplicas Minimum number of Keycloak replicas +## @param autoscaling.maxReplicas Maximum number of Keycloak replicas +## @param autoscaling.targetCPU Target CPU utilization percentage +## @param autoscaling.targetMemory Target Memory utilization percentage +## +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Metrics parameters +## + +## Metrics configuration +## +metrics: + ## @param metrics.enabled Enable exposing Keycloak statistics + ## ref: https://github.com/bitnami/bitnami-docker-keycloak#enabling-statistics + ## + enabled: false + ## Keycloak metrics service parameters + ## + service: + ## @param metrics.service.ports.http Metrics service HTTP port + ## + ports: + http: 9990 + ## @param metrics.service.annotations [object] Annotations for enabling prometheus to access the metrics endpoints + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.http }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section keycloak-config-cli parameters + +## Configuration for keycloak-config-cli +## ref: https://github.com/adorsys/keycloak-config-cli +## +keycloakConfigCli: + ## @param keycloakConfigCli.enabled Whether to enable keycloak-config-cli job + ## + enabled: false + ## Bitnami keycloak-config-cli image + ## ref: https://hub.docker.com/r/bitnami/keycloak-config-cli/tags/ + ## @param keycloakConfigCli.image.registry keycloak-config-cli container image registry + ## @param keycloakConfigCli.image.repository keycloak-config-cli container image repository + ## @param keycloakConfigCli.image.tag keycloak-config-cli container image tag + ## @param keycloakConfigCli.image.pullPolicy keycloak-config-cli container image pull policy + ## @param keycloakConfigCli.image.pullSecrets keycloak-config-cli container image pull secrets + ## + image: + registry: docker.io + repository: bitnami/keycloak-config-cli + tag: 4.9.0-debian-10-r14 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param keycloakConfigCli.annotations [object] Annotations for keycloak-config-cli job + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + helm.sh/hook: "post-install,post-upgrade,post-rollback" + helm.sh/hook-delete-policy: "hook-succeeded,before-hook-creation" + helm.sh/hook-weight: "5" + ## @param keycloakConfigCli.command Command for running the container (set to default if not set). Use array form + ## + command: [] + ## @param keycloakConfigCli.args Args for running the container (set to default if not set). Use array form + ## + args: [] + ## @param keycloakConfigCli.hostAliases Job pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Keycloak config CLI resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param keycloakConfigCli.resources.limits The resources limits for the keycloak-config-cli container + ## @param keycloakConfigCli.resources.requests The requested resources for the keycloak-config-cli container + ## + resources: + limits: {} + requests: {} + ## keycloak-config-cli containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param keycloakConfigCli.containerSecurityContext.enabled Enabled keycloak-config-cli containers' Security Context + ## @param keycloakConfigCli.containerSecurityContext.runAsUser Set keycloak-config-cli container's Security Context runAsUser + ## @param keycloakConfigCli.containerSecurityContext.runAsNonRoot Set keycloak-config-cli container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## keycloak-config-cli pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param keycloakConfigCli.podSecurityContext.enabled Enabled keycloak-config-cli pods' Security Context + ## @param keycloakConfigCli.podSecurityContext.fsGroup Set keycloak-config-cli pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## @param keycloakConfigCli.backoffLimit Number of retries before considering a Job as failed + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy + ## + backoffLimit: 1 + ## @param keycloakConfigCli.podLabels Pod extra labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param keycloakConfigCli.podAnnotations Annotations for job pod + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param keycloakConfigCli.extraEnvVars Additional environment variables to set + ## Example: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param keycloakConfigCli.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param keycloakConfigCli.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param keycloakConfigCli.extraVolumes Extra volumes to add to the job + ## + extraVolumes: [] + ## @param keycloakConfigCli.extraVolumeMounts Extra volume mounts to add to the container + ## + extraVolumeMounts: [] + ## @param keycloakConfigCli.configuration keycloak-config-cli realms configuration + ## NOTE: nil keys will be considered files to import locally + ## Example: + ## configuration: + ## realm1.json: | + ## { + ## "realm": "realm1", + ## "clients": [] + ## } + ## files/realm2.yaml: + ## realm3.yaml: | + ## realm: realm3 + ## clients: [] + ## + configuration: {} + ## @param keycloakConfigCli.existingConfigmap ConfigMap with keycloak-config-cli configuration. This will override `keycloakConfigCli.config` + ## NOTE: This will override keycloakConfigCli.configuration + ## + existingConfigmap: "" + +## @section Database parameters + +## PostgreSQL chart configuration +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/values.yaml +## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart +## @param postgresql.auth.username Name for a custom user to create +## @param postgresql.auth.password Password for the custom user to create +## @param postgresql.auth.database Name for a custom database to create +## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials +## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`) +## +postgresql: + enabled: true + auth: + username: bn_keycloak + password: "" + database: bitnami_keycloak + existingSecret: "" + architecture: standalone +## External PostgreSQL configuration +## All of these values are only used when postgresql.enabled is set to false +## @param externalDatabase.host Database host +## @param externalDatabase.port Database port number +## @param externalDatabase.user Non-root username for Keycloak +## @param externalDatabase.password Password for the non-root username for Keycloak +## @param externalDatabase.database Keycloak database name +## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials +## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials +## +externalDatabase: + host: "" + port: 5432 + user: bn_keycloak + database: bitnami_keycloak + password: "" + existingSecret: "" + existingSecretPasswordKey: "" diff --git a/ansible/roles/helm_install/files/kubeconfig b/ansible/roles/helm_install/files/kubeconfig new file mode 100644 index 0000000..95b048c --- /dev/null +++ b/ansible/roles/helm_install/files/kubeconfig @@ -0,0 +1,20 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXdOakV3TkRZME5Gb1hEVE15TVRJd016RXdORFkwTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT0xYCnhYSWpmVWdFdXdxMFZrYXQ0N2d6Q09vRERDL1YvRHl5T0dWZG1PSDEwd1dHZmwrMFI0dkkwaWlHc0ZKRVFqWWMKRGt6THZUOHlRNVpybFpiV3VuVkFYaTlrcHRxY0d2R2NZYXdrMGVIRVdqQ1RRTXo0T2dOd2JuQytKdkd1MnJiVgpaUVVvRzV1cHpDRkM0Q0RtM1h2Ym14RjdYUU11L0FOU045V1UwYS9Td0tvR25EaVMyV1JBOFJpRG9DYURKcGprCjc1azAyMkZOanlOZkxadktmMzFzNTg3OGF1bS9WS0UzeVV4SGhqVzFEeEpTWUMrK1RhV0F0MWpBVXZObWVNQmQKVStmRW0xbUVnNWtnWVBER2d3czBSVHB4WFk1U3dTL3hKVkd2VjRYeExlWEVoWmc2ZlF4c0hvcG1vdVZJOEJLMQpUdW1ram4zTkhrU0dtRVEzczBFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNc2RSdjJzQ2RaUW1ZM1dVZ1U4aUlhOHM5cWpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRXZ5WWorT2QwVSttYkdkSzJyWApTcXdiZjY0M0V4YWlkVDVWQjhPUExoNm1kbVQvTk04cHdYUDg0Y0J2RC9rSzBXTllObkc2RFhTS3BtWjBSM1BmCm03ck8wZ2N4M1dYaDIzWHk5VmJWcGcyK0FXdFRRVDd5dVM3L3U3YjVnV1lQNzVqNk0rOWQvdXNtc3g3alFxNjEKa3NJTzlKNXVmNCtXSHJtQm1WMGNydGZTTjN5bTF2Q2VDZzNNeFpkY3hIUXgrUjNNWFZyYVZFK1NIUE5hdzgzQQppZFI3eDkyenMwY3dRcy9wSGxRK0t5aDFnQ01JQkVFTXM1OERwZU11Q0VUaEZ2elBuL1l6YlVoNUYrcm1KNytoCjJ5VVV0R2x5V3NJQ2l5ODl0Y2pOT1RBN2EvSGJ5ZDk4MmxIM1hLY1ZlUUdYVlJFTFF0UU9sUVNHOVRQU1R4OFcKNWdJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://10.10.30.214:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYzVmZFNYMzFJTWd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1EWXhNRFEyTkRSYUZ3MHlNekV5TURZeE1EUTJORGRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTFBRGVDUGF5elpXTWtpV1AKQzV0RmRzL3orSmpndDYzUzFxMC9TQ2VYTWx0MGRTVGFqb3B5UzRueFVPb1ZXYmY2M016NEQ0RzRDWDNheW1TWgpLa1JmbVJ5SEtNRTVWTll6WUNWSUY1Qm1acEpVUHJ6OU5vZVRocDlQOTV2M3hIWlNBektvKysyZnQ1dzJ1dGJYCnBkN3g0Nk5KSnBPb1I5M2ZxOFFZRkhHOEhhSEd6WjhJbS9CV21KVEJua2R2aGJxczB2R3ZmbkdXRGhFT05wd2IKd2w4Z0kwMFpqTHhBRkZxY1d1MVZtNmJZYXUzVDlrSGdEcEplNUVrSFVFZ2cwWWlOTlZlUzJHdGFaeUpiWUcxZQo4Um9Zd3k0cFlvbzVlMW1sZDJHWE1EUWhkQk1oWGk5R1NsSC9GaWlqN24xTVZYTFpCSk9UL0lSdElQR1dIdmtXCmxVcEQ4UUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUTEhVYjlyQW5XVUptTjFsSUZQSWlHdkxQYQpvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTTFLbEwyL3BwU1pCTGpTd3NWZzhGYTlkWk05RDIzNDNJdzY3Ci9QN1JWKzRjSVJqSXlJZFZGMUgrbkl2bFl0cncrMkZDTHpQYWNYcTZUWi9IUktFVEZtb1FYRnhnVk5GRDZVZXAKTW1sd2FCK1BnUmFRWDZoZjJlRUNXaXBobXpLRitsQnRtNENpci9USlZwTitQQ1Jhc0VkVW9pVUVtUHVsRXYvUQp1bDg3Q3RUbVAzajkxRi94eWFrYmhSS2pPbUY0dFRlZ1E4ZUE2SkFYV0d6S09XMzdZR1BHc091ZXlzU3hzanp0CjdpRjczVGVKdmdtbUllWHkrMkdoTFdIQ0tHVGJUek1UZnpYV0J4ODBXbzVjdFNaWmYzT0NJWWRHM1ViY0lsdXMKR0JreXM4b0phSXFHSE93NUpCRE1PNzliajQrbU01MlJGNmtQbkJDL2duckUyQUdHM3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMUFEZUNQYXl6WldNa2lXUEM1dEZkcy96K0pqZ3Q2M1MxcTAvU0NlWE1sdDBkU1RhCmpvcHlTNG54VU9vVldiZjYzTXo0RDRHNENYM2F5bVNaS2tSZm1SeUhLTUU1Vk5ZellDVklGNUJtWnBKVVByejkKTm9lVGhwOVA5NXYzeEhaU0F6S28rKzJmdDV3MnV0YlhwZDd4NDZOSkpwT29SOTNmcThRWUZIRzhIYUhHelo4SQptL0JXbUpUQm5rZHZoYnFzMHZHdmZuR1dEaEVPTnB3YndsOGdJMDBaakx4QUZGcWNXdTFWbTZiWWF1M1Q5a0hnCkRwSmU1RWtIVUVnZzBZaU5OVmVTMkd0YVp5SmJZRzFlOFJvWXd5NHBZb281ZTFtbGQyR1hNRFFoZEJNaFhpOUcKU2xIL0ZpaWo3bjFNVlhMWkJKT1QvSVJ0SVBHV0h2a1dsVXBEOFFJREFRQUJBb0lCQUZsT0VEb29hY091WnF1OQp4SmM0RGpmeGU2MVNBUDkrNnB6aUdCRTJGRHZ6U0loOFFORGd3eXJNN2VtTzRmV01TZEd2U2lPR0dsZHRPN2djClRtVCtybUthSU5sckk5SjM5T1pnYmhEM0ZCdkxNay9IWHNjVXIzRjdOTDF5WnhuTVdkbmRBbEExbGgxTFljYXMKNytTQW1OYXlsd0w0R21CRHQ0L3NwOVFjNFFoOXRDQXdTMGUvb1k1cnl4QzFBb25zNUFIMmJGNGg3SGRMM0tvWApHMjRjTm9MY2d0K0J4M00wYSs2aGFoSmV6aGhVL2R5L1dRdjlJay9VRlJra3dBdkxvM3VPUVA1bzB0RVpXcXF1CkhUM3VRLzM4bTBwOGE1U21WR250RTNGWERQRlJGUi9aWXBjME1FMHRPYzZ0U3BmUHQzajRqUDBubjRGMStXdEQKTWhBSlNIMENnWUVBNHNzTTJ3Y1lKaUZhdTFCRzd4cXJkVGMrTWl4L3pWWWZtZHRhemtUcEsvRFF3ZmdRWVREbgpWbWRwNW5MRTdyeTRaMU90RGg3d0pKTVMySHRvRW9sTk9YMjVMKzNBaFgzRFUrU1lNeDJ2VU1jcUVzak1hUHFDCjFvM0dxa2JiNmozS2RZck9jWDhFUXVBWlhmc0RTSWxUOHNxcFdRcGFEZE9RcE15Y05WYzNjTDhDZ1lFQTcwNDUKd1d1dXN2UitGWUgrdW12bU9Ndm9VdmNROXJFcnZGcEJkdndiQnJsWHgvWFBNNE9kYXZXV1hQb3hxbWpUeExzcApnV000V0lVUUN6RjZGb2V5Kzc0T2ZmTmJtZG8renNVSkx5bnBPb3AyWUNUWk8vQ1NCNTFXNHN6UTlQaTErak9xCkdqdjVCK1RqV3o0cEdKWWp2eGpXZ2Nha1FDZEgzRHVTeFlYMngwOENnWUJaT0RRb2ZsUDd2Q2RyaFJ0Q3VTVTIKaWJNSUhnVnhEQzZHWW9zSWxvZDhaOUpZWEhSbEo4MzZhZGg1ZGpFUEVtTWhFd1FEaUJ4RTV5OEV4eGVjSXpPawpLRmVRQ1dJeG9kWVR6TndyVDhSR2JQT2FUREJPSkM4UXBObkE1dnRnM1VvbWo2TERkNHAvbkpXZUtUK1RhNk1BCjRzVllhQUFoYkZkODNabWVTbDlmRlFLQmdRQ3RURlQvQVdCT01FaHVndWxaVDNJMWgxVURYL0JrOWdEYU1mSmUKbkV0bUh5cTJvQWdoSWhzSnJqZnB0VFhxVm1lbGZIU2VRcUEzV29VMzFlaTRFQ1ZKc1dVRlNRcjQ2OWU0SFhCOQpPeml2TUQ1eGViM25ibHdTTDVzUU80ckhIS1dNUDRYYjRicUNRUHQweEJzMnR1UEVLOVNMdnJLTDB1WnpVcUVECmNmUTRlUUtCZ1FDV0xtUEJwUDVMVFA0YmxRVDFMMy9LQ0cweGxzL2pFL1JBVlE5VmY1UVVPeFlUaHBlZTUzdXAKUmwxOEVuSks1THpuNlF2TzRMeXRGdVNLeG5aRnYxY1pycUNKR0owcXhNc3VpTGVtbnYxb2NGMDdiWkYwV0pzWQowak9Ha0oweXZtUlFBVzhQUEhJeGdzN3Y3ekNzZHdTcmx3REEvYnNxa1BUVEJxNlpSUHhSUXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + diff --git a/ansible/roles/helm_install/files/mongo-dsk/.helmignore b/ansible/roles/helm_install/files/mongo-dsk/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/mongo-dsk/Chart.lock b/ansible/roles/helm_install/files/mongo-dsk/Chart.lock new file mode 100644 index 0000000..b8814b6 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.11.3 +digest: sha256:d5f850d857edd58b32c0e10652f6ec3ce5018def5542f2bcef38fd7fa0079d6b +generated: "2022-03-07T11:59:31.665943918Z" diff --git a/ansible/roles/helm_install/files/mongo-dsk/Chart.yaml b/ansible/roles/helm_install/files/mongo-dsk/Chart.yaml new file mode 100644 index 0000000..8bf0c80 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 4.4.13 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: MongoDB(R) is an open source NoSQL database that uses JSON for data storage. + MongoDB(TM) Sharded improves scalability and reliability for large datasets by distributing + data across multiple machines. +home: https://github.com/bitnami/charts/tree/master/bitnami/mongodb-sharded +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb-sharded +sources: +- https://github.com/bitnami/bitnami-docker-mongodb-sharded +- https://mongodb.org +version: 4.0.10 diff --git a/ansible/roles/helm_install/files/mongo-dsk/README.md b/ansible/roles/helm_install/files/mongo-dsk/README.md new file mode 100644 index 0000000..37dbc29 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/README.md @@ -0,0 +1,548 @@ + + +# MongoDB(R) Sharded packaged by Bitnami + +MongoDB(R) is an open source NoSQL database that uses JSON for data storage. MongoDB(TM) Sharded improves scalability and reliability for large datasets by distributing data across multiple machines. + +[Overview of MongoDB® Sharded](http://www.mongodb.org) + +Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB(R) is run and maintained by MongoDB, which is a completely separate project from Bitnami. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mongodb-sharded +``` + +## Introduction + +This chart bootstraps a [MongoDB(®) Sharded](https://github.com/bitnami/bitnami-docker-mongodb-sharded) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Classified as a NoSQL database, MongoDB® eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +This chart uses the [sharding method](https://docs.mongodb.com/manual/sharding/) for distributing data across multiple machines. This is meant for deployments with very large data sets and high throughput operations. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mongodb-sharded +``` + +The command deploys MongoDB® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global storage class for dynamic provisioning | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override mongodb.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MongoDB(®) Sharded parameters + +| Name | Description | Value | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `image.registry` | MongoDB(®) Sharded image registry | `docker.io` | +| `image.repository` | MongoDB(®) Sharded Image name | `bitnami/mongodb-sharded` | +| `image.tag` | MongoDB(®) Sharded image tag (immutable tags are recommended) | `4.4.11-debian-10-r6` | +| `image.pullPolicy` | MongoDB(®) Sharded image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `mongodbRootPassword` | MongoDB® root password | `""` | +| `replicaSetKey` | Replica Set key (shared for shards and config servers) | `""` | +| `existingSecret` | Existing secret with MongoDB® credentials | `""` | +| `usePasswordFile` | Mount credentials as files instead of using environment variables | `false` | +| `shards` | Number of shards to be created | `2` | +| `common.mongodbEnableNumactl` | Enable launch MongoDB instance prefixed with "numactl --interleave=all" | `false` | +| `common.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `common.mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB® | `false` | +| `common.mongodbDirectoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB® | `false` | +| `common.mongodbSystemLogVerbosity` | MongoDB® system log verbosity level | `0` | +| `common.mongodbDisableSystemLog` | Whether to disable MongoDB® system log or not | `false` | +| `common.mongodbMaxWaitTimeout` | Maximum time (in seconds) for MongoDB® nodes to wait for another MongoDB® node to be ready | `120` | +| `common.initScriptsCM` | Configmap with init scripts to execute | `""` | +| `common.initScriptsSecret` | Secret with init scripts to execute (for sensitive data) | `""` | +| `common.extraEnvVars` | An array to add extra env vars | `[]` | +| `common.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `common.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `common.sidecars` | Add sidecars to the pod | `[]` | +| `common.initContainers` | Add init containers to the pod | `[]` | +| `common.podAnnotations` | Additional pod annotations | `{}` | +| `common.podLabels` | Additional pod labels | `{}` | +| `common.extraVolumes` | Array to add extra volumes | `[]` | +| `common.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes) | `[]` | +| `common.containerPorts.mongo` | MongoDB container port | `27017` | +| `common.serviceAccount.create` | Whether to create a Service Account for all pods automatically | `false` | +| `common.serviceAccount.name` | Name of a Service Account to be used by all Pods | `""` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r308` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources` | Init container resource requests/limit | `{}` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `securityContext.runAsNonRoot` | Run containers as non-root users | `true` | +| `service.name` | Specify an explicit service name | `""` | +| `service.annotations` | Additional service annotations (evaluate as a template) | `{}` | +| `service.type` | Service type | `ClusterIP` | +| `service.externalTrafficPolicy` | External traffic policy | `Cluster` | +| `service.port` | MongoDB® service port | `27017` | +| `service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `service.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types. | `""` | +| `service.externalIPs` | External IP list to use with ClusterIP service type | `[]` | +| `service.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `""` | +| `service.loadBalancerSourceRanges` | List of IP ranges allowed access to load balancer (if supported) | `[]` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | + + +### Config Server parameters + +| Name | Description | Value | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------- | +| `configsvr.replicas` | Number of nodes in the replica set (the first node will be primary) | `1` | +| `configsvr.resources` | Configure pod resources | `{}` | +| `configsvr.hostAliases` | Deployment pod host aliases | `[]` | +| `configsvr.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `configsvr.priorityClassName` | Pod priority class name | `""` | +| `configsvr.podAffinityPreset` | Config Server Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `configsvr.podAntiAffinityPreset` | Config Server Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `configsvr.nodeAffinityPreset.type` | Config Server Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `configsvr.nodeAffinityPreset.key` | Config Server Node label key to match Ignored if `affinity` is set. | `""` | +| `configsvr.nodeAffinityPreset.values` | Config Server Node label values to match. Ignored if `affinity` is set. | `[]` | +| `configsvr.affinity` | Config Server Affinity for pod assignment | `{}` | +| `configsvr.nodeSelector` | Config Server Node labels for pod assignment | `{}` | +| `configsvr.tolerations` | Config Server Tolerations for pod assignment | `[]` | +| `configsvr.podManagementPolicy` | Statefulset's pod management policy, allows parallel startup of pods | `OrderedReady` | +| `configsvr.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `configsvr.config` | MongoDB® configuration file | `""` | +| `configsvr.configCM` | ConfigMap name with Config Server configuration file (cannot be used with configsvr.config) | `""` | +| `configsvr.extraEnvVars` | An array to add extra env vars | `[]` | +| `configsvr.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `configsvr.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `configsvr.sidecars` | Add sidecars to the pod | `[]` | +| `configsvr.initContainers` | Add init containers to the pod | `[]` | +| `configsvr.podAnnotations` | Additional pod annotations | `{}` | +| `configsvr.podLabels` | Additional pod labels | `{}` | +| `configsvr.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `configsvr.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes). Normally used with `extraVolumes` | `[]` | +| `configsvr.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `configsvr.pdb.enabled` | Enable pod disruption budget | `false` | +| `configsvr.pdb.minAvailable` | Minimum number of available config pods allowed (`0` to disable) | `0` | +| `configsvr.pdb.maxUnavailable` | Maximum number of unavailable config pods allowed (`0` to disable) | `1` | +| `configsvr.persistence.enabled` | Use a PVC to persist data | `true` | +| `configsvr.persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` | +| `configsvr.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `configsvr.persistence.storageClass` | Storage class of backing PVC | `""` | +| `configsvr.persistence.accessModes` | Use volume as ReadOnly or ReadWrite | `["ReadWriteOnce"]` | +| `configsvr.persistence.size` | PersistentVolumeClaim size | `8Gi` | +| `configsvr.persistence.annotations` | Persistent Volume annotations | `{}` | +| `configsvr.serviceAccount.create` | Specifies whether a ServiceAccount should be created for Config Server | `false` | +| `configsvr.serviceAccount.name` | Name of a Service Account to be used by Config Server | `""` | +| `configsvr.external.host` | Primary node of an external Config Server replicaset | `""` | +| `configsvr.external.rootPassword` | Root password of the external Config Server replicaset | `""` | +| `configsvr.external.replicasetName` | Replicaset name of an external Config Server | `""` | +| `configsvr.external.replicasetKey` | Replicaset key of an external Config Server | `""` | + + +### Mongos parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------ | --------------- | +| `mongos.replicas` | Number of replicas | `1` | +| `mongos.resources` | Configure pod resources | `{}` | +| `mongos.hostAliases` | Deployment pod host aliases | `[]` | +| `mongos.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `mongos.priorityClassName` | Pod priority class name | `""` | +| `mongos.podAffinityPreset` | Mongos Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `mongos.podAntiAffinityPreset` | Mongos Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `mongos.nodeAffinityPreset.type` | Mongos Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `mongos.nodeAffinityPreset.key` | Mongos Node label key to match Ignored if `affinity` is set. | `""` | +| `mongos.nodeAffinityPreset.values` | Mongos Node label values to match. Ignored if `affinity` is set. | `[]` | +| `mongos.affinity` | Mongos Affinity for pod assignment | `{}` | +| `mongos.nodeSelector` | Mongos Node labels for pod assignment | `{}` | +| `mongos.tolerations` | Mongos Tolerations for pod assignment | `[]` | +| `mongos.podManagementPolicy` | Statefulsets pod management policy, allows parallel startup of pods | `OrderedReady` | +| `mongos.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `mongos.config` | MongoDB® configuration file | `""` | +| `mongos.configCM` | ConfigMap name with MongoDB® configuration file (cannot be used with mongos.config) | `""` | +| `mongos.extraEnvVars` | An array to add extra env vars | `[]` | +| `mongos.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `mongos.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `mongos.sidecars` | Add sidecars to the pod | `[]` | +| `mongos.initContainers` | Add init containers to the pod | `[]` | +| `mongos.podAnnotations` | Additional pod annotations | `{}` | +| `mongos.podLabels` | Additional pod labels | `{}` | +| `mongos.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `mongos.extraVolumeMounts` | Array to add extra volume mounts. Normally used with `extraVolumes`. | `[]` | +| `mongos.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `mongos.useStatefulSet` | Use StatefulSet instead of Deployment | `false` | +| `mongos.servicePerReplica.enabled` | Create one service per mongos replica (must be used with statefulset) | `false` | +| `mongos.servicePerReplica.annotations` | Additional service annotations (evaluate as a template) | `{}` | +| `mongos.servicePerReplica.type` | Service type | `ClusterIP` | +| `mongos.servicePerReplica.externalTrafficPolicy` | External traffic policy | `Cluster` | +| `mongos.servicePerReplica.port` | MongoDB® service port | `27017` | +| `mongos.servicePerReplica.clusterIP` | Static clusterIP or None for headless services | `""` | +| `mongos.servicePerReplica.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types | `""` | +| `mongos.servicePerReplica.externalIPs` | External IP list to use with ClusterIP service type | `[]` | +| `mongos.servicePerReplica.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `""` | +| `mongos.servicePerReplica.loadBalancerSourceRanges` | List of IP ranges allowed access to load balancer (if supported) | `[]` | +| `mongos.servicePerReplica.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `mongos.servicePerReplica.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `mongos.pdb.enabled` | Enable pod disruption budget | `false` | +| `mongos.pdb.minAvailable` | Minimum number of available mongo pods allowed (`0` to disable) | `0` | +| `mongos.pdb.maxUnavailable` | Maximum number of unavailable mongo pods allowed (`0` to disable) | `1` | +| `mongos.serviceAccount.create` | Whether to create a Service Account for mongos automatically | `false` | +| `mongos.serviceAccount.name` | Name of a Service Account to be used by mongos | `""` | + + +### Shard configuration: Data node parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------------- | --------------- | +| `shardsvr.dataNode.replicas` | Number of nodes in each shard replica set (the first node will be primary) | `1` | +| `shardsvr.dataNode.resources` | Configure pod resources | `{}` | +| `shardsvr.dataNode.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `shardsvr.dataNode.priorityClassName` | Pod priority class name | `""` | +| `shardsvr.dataNode.podAffinityPreset` | Data nodes Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.dataNode.podAntiAffinityPreset` | Data nodes Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `shardsvr.dataNode.nodeAffinityPreset.type` | Data nodes Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.dataNode.nodeAffinityPreset.key` | Data nodes Node label key to match Ignored if `affinity` is set. | `""` | +| `shardsvr.dataNode.nodeAffinityPreset.values` | Data nodes Node label values to match. Ignored if `affinity` is set. | `[]` | +| `shardsvr.dataNode.affinity` | Data nodes Affinity for pod assignment | `{}` | +| `shardsvr.dataNode.nodeSelector` | Data nodes Node labels for pod assignment | `{}` | +| `shardsvr.dataNode.tolerations` | Data nodes Tolerations for pod assignment | `[]` | +| `shardsvr.dataNode.podManagementPolicy` | podManagementPolicy for the statefulset, allows parallel startup of pods | `OrderedReady` | +| `shardsvr.dataNode.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `shardsvr.dataNode.hostAliases` | Deployment pod host aliases | `[]` | +| `shardsvr.dataNode.config` | Entries for the MongoDB® config file | `""` | +| `shardsvr.dataNode.configCM` | ConfigMap name with MongoDB® configuration (cannot be used with shardsvr.dataNode.config) | `""` | +| `shardsvr.dataNode.extraEnvVars` | An array to add extra env vars | `[]` | +| `shardsvr.dataNode.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `shardsvr.dataNode.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `shardsvr.dataNode.sidecars` | Attach additional containers (evaluated as a template) | `[]` | +| `shardsvr.dataNode.initContainers` | Add init containers to the pod | `[]` | +| `shardsvr.dataNode.podAnnotations` | Additional pod annotations | `{}` | +| `shardsvr.dataNode.podLabels` | Additional pod labels | `{}` | +| `shardsvr.dataNode.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `shardsvr.dataNode.extraVolumeMounts` | Array to add extra mounts. Normally used with `extraVolumes` | `[]` | +| `shardsvr.dataNode.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `shardsvr.dataNode.pdb.enabled` | Enable pod disruption budget | `false` | +| `shardsvr.dataNode.pdb.minAvailable` | Minimum number of available data pods allowed (`0` to disable) | `0` | +| `shardsvr.dataNode.pdb.maxUnavailable` | Maximum number of unavailable data pods allowed (`0` to disable) | `1` | +| `shardsvr.dataNode.serviceAccount.create` | Specifies whether a ServiceAccount should be created for shardsvr | `false` | +| `shardsvr.dataNode.serviceAccount.name` | Name of a Service Account to be used by shardsvr data pods | `""` | + + +### Shard configuration: Persistence parameters + +| Name | Description | Value | +| ----------------------------------- | ---------------------------------------------------------------------------------------- | ------------------- | +| `shardsvr.persistence.enabled` | Use a PVC to persist data | `true` | +| `shardsvr.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB® images. | `/bitnami/mongodb` | +| `shardsvr.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `shardsvr.persistence.storageClass` | Storage class of backing PVC | `""` | +| `shardsvr.persistence.accessModes` | Use volume as ReadOnly or ReadWrite | `["ReadWriteOnce"]` | +| `shardsvr.persistence.size` | PersistentVolumeClaim size | `8Gi` | +| `shardsvr.persistence.annotations` | Additional volume annotations | `{}` | + + +### Shard configuration: Arbiter parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------- | +| `shardsvr.arbiter.replicas` | Number of arbiters in each shard replica set (the first node will be primary) | `0` | +| `shardsvr.arbiter.hostAliases` | Deployment pod host aliases | `[]` | +| `shardsvr.arbiter.resources` | Configure pod resources | `{}` | +| `shardsvr.arbiter.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `shardsvr.arbiter.priorityClassName` | Pod priority class name | `""` | +| `shardsvr.arbiter.podAffinityPreset` | Arbiter's Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.arbiter.podAntiAffinityPreset` | Arbiter's Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `shardsvr.arbiter.nodeAffinityPreset.type` | Arbiter's Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.arbiter.nodeAffinityPreset.key` | Arbiter's Node label key to match Ignored if `affinity` is set. | `""` | +| `shardsvr.arbiter.nodeAffinityPreset.values` | Arbiter's Node label values to match. Ignored if `affinity` is set. | `[]` | +| `shardsvr.arbiter.affinity` | Arbiter's Affinity for pod assignment | `{}` | +| `shardsvr.arbiter.nodeSelector` | Arbiter's Node labels for pod assignment | `{}` | +| `shardsvr.arbiter.tolerations` | Arbiter's Tolerations for pod assignment | `[]` | +| `shardsvr.arbiter.podManagementPolicy` | Statefulset's pod management policy, allows parallel startup of pods | `OrderedReady` | +| `shardsvr.arbiter.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `shardsvr.arbiter.config` | MongoDB® configuration file | `""` | +| `shardsvr.arbiter.configCM` | ConfigMap name with MongoDB® configuration file (cannot be used with shardsvr.arbiter.config) | `""` | +| `shardsvr.arbiter.extraEnvVars` | An array to add extra env vars | `[]` | +| `shardsvr.arbiter.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `shardsvr.arbiter.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `shardsvr.arbiter.sidecars` | Add sidecars to the pod | `[]` | +| `shardsvr.arbiter.initContainers` | Add init containers to the pod | `[]` | +| `shardsvr.arbiter.podAnnotations` | Additional pod annotations | `{}` | +| `shardsvr.arbiter.podLabels` | Additional pod labels | `{}` | +| `shardsvr.arbiter.extraVolumes` | Array to add extra volumes | `[]` | +| `shardsvr.arbiter.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes) | `[]` | +| `shardsvr.arbiter.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `shardsvr.arbiter.serviceAccount.create` | Specifies whether a ServiceAccount should be created for shardsvr arbiter nodes | `false` | +| `shardsvr.arbiter.serviceAccount.name` | Name of a Service Account to be used by shardsvr arbiter pods | `""` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ---------------------------------------------------------------------------------- | -------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB® exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB® exporter image name | `bitnami/mongodb-exporter` | +| `metrics.image.tag` | MongoDB® exporter image tag | `0.30.0-debian-10-r53` | +| `metrics.image.pullPolicy` | MongoDB® exporter image pull policy | `Always` | +| `metrics.image.pullSecrets` | MongoDB® exporter image pull secrets | `[]` | +| `metrics.useTLS` | Whether to connect to MongoDB® with TLS | `false` | +| `metrics.extraArgs` | String with extra arguments to the metrics exporter | `""` | +| `metrics.resources` | Metrics exporter resource requests and limits | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `false` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `false` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation | `{}` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Namespace where podmonitor resource should be created | `monitoring` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set shards=4,configsvr.replicas=3,shardsvr.dataNode.replicas=2 \ + bitnami/mongodb-sharded +``` + +The above command sets the number of shards to 4, the number of replicas for the config servers to 3 and number of replicas for data nodes to 2. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mongodb-sharded +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change MongoDB® version + +To modify the MongoDB® version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/mongodb-sharded/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Sharding + +This chart deploys a sharded cluster by default. Some characteristics of this chart are: + +- It allows HA by enabling replication on the shards and the config servers. The mongos instances can be scaled horizontally as well. +- The number of secondary and arbiter nodes can be scaled out independently. + +### Initialize a fresh instance + +The [Bitnami MongoDB®](https://github.com/bitnami/bitnami-docker-mongodb-sharded) image allows you to use your custom scripts to initialize a fresh instance. You can create a custom config map and give it via `initScriptsCM`(check options for more details). + +The allowed extensions are `.sh`, and `.js`. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as Kibana (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections). Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections) property. + +```yaml +extraEnvVars: + - name: MONGODB_VERSION + value: 4.0 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Using an external config server + +It is possible to not deploy any shards or a config server. For example, it is possible to simply deploy `mongos` instances that point to an external MongoDB® sharded database. If that is the case, set the `configsvr.external.host` and `configsvr.external.replicasetName` for the mongos instances to connect. For authentication, set the `configsvr.external.rootPassword` and `configsvr.external.replicasetKey` values. + +## Persistence + +The [Bitnami MongoDB®](https://github.com/bitnami/bitnami-docker-mongodb-sharded) image stores the MongoDB® data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Adding extra volumes + +The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` properties (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections). This can be combined with advanced operations like adding extra init containers and sidecars. + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +If authentication is enabled, it's necessary to set the `mongodbRootPassword` and `replicaSetKey` when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use. Please note down the password, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mongodb-sharded --set mongodbRootPassword=[PASSWORD] (--set replicaSetKey=[REPLICASETKEY]) +``` + +> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes. + +### To 4.0.0 + +In this version, the mongodb-exporter bundled as part of this Helm chart was updated to a new version which, even it is not a major change, can contain breaking changes (from `0.11.X` to `0.30.X`). +Please visit the release notes from the upstream project at https://github.com/percona/mongodb_exporter/releases + +### To 3.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 3.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 2.0.0 + +MongoDB® container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB®. Refer to the following guide to upgrade your applications: + +- [Upgrade a Sharded Cluster to 4.4](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-sharded-cluster/) + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/.helmignore b/ansible/roles/helm_install/files/mongo-dsk/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/Chart.yaml b/ansible/roles/helm_install/files/mongo-dsk/charts/common/Chart.yaml new file mode 100644 index 0000000..3f32f99 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.11.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.11.3 diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/README.md b/ansible/roles/helm_install/files/mongo-dsk/charts/common/README.md new file mode 100644 index 0000000..8dc47f0 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/README.md @@ -0,0 +1,345 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..b94212b --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_names.tpl new file mode 100644 index 0000000..cf03231 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_names.tpl @@ -0,0 +1,52 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/charts/common/values.yaml b/ansible/roles/helm_install/files/mongo-dsk/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/mongo-dsk/override-values.yaml b/ansible/roles/helm_install/files/mongo-dsk/override-values.yaml new file mode 100644 index 0000000..806d580 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/override-values.yaml @@ -0,0 +1,49 @@ +mongodbRootPassword: "mongo#pass" +configsvr: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + external: + rootPassword: "mongo#pass" +service: + type: NodePort + nodePort: "30112" +mongos: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid +shardsvr: + dataNode: + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/NOTES.txt b/ansible/roles/helm_install/files/mongo-dsk/templates/NOTES.txt new file mode 100644 index 0000000..6db3cef --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/NOTES.txt @@ -0,0 +1,74 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/mongodb-sharded/entrypoint.sh /opt/bitnami/scripts/mongodb-sharded/run.sh + +{{- else }} + +The MongoDB® Sharded cluster can be accessed via the Mongos instances in port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "mongodb-sharded.serviceName" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mongodb-sharded.image" . }} --command -- mongo admin --host {{ include "mongodb-sharded.serviceName" . }} {{- if .Values.usePassword }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mongodb-sharded.serviceName" . }}) + mongo --host $NODE_IP --port $NODE_PORT --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "mongodb-sharded.serviceName" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mongodb-sharded.serviceName" . }} --include "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + mongo --host $SERVICE_IP --port {{ .Values.service.port }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "mongodb-sharded.serviceName" . }} {{ .Values.service.port }}:{{ .Values.service.port }} & + mongo --host 127.0.0.1 --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- end }} +{{- end }} + +{{- include "mongodb-sharded.validateValues" . -}} +{{- include "mongodb-sharded.checkRollingTags" . -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/_helpers.tpl b/ansible/roles/helm_install/files/mongo-dsk/templates/_helpers.tpl new file mode 100644 index 0000000..679eaa9 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/_helpers.tpl @@ -0,0 +1,266 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Returns a ServiceAccount name for specified path or falls back to `common.serviceAccount.name` +if `common.serviceAccount.create` is set to true. Falls back to Chart's fullname otherwise. +Usage: +{{ include "mongodb-sharded.serviceAccountName" (dict "value" .Values.path.to.serviceAccount "context" $) }} +*/}} +{{- define "mongodb-sharded.serviceAccountName" -}} +{{- if .value.create }} + {{- default (include "common.names.fullname" .context) .value.name | quote }} +{{- else if .context.Values.common.serviceAccount.create }} + {{- default (include "common.names.fullname" .context) .context.Values.common.serviceAccount.name | quote }} +{{- else -}} + {{- default "default" .value.name | quote }} +{{- end }} +{{- end }} + +{{/* +Renders a ServiceAccount for specified name. +Usage: +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.path.to.serviceAccount "context" $) }} +*/}} +{{- define "mongodb-sharded.serviceaccount" -}} +{{- if .value.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mongodb-sharded.serviceAccountName" (dict "value" .value "context" .context) }} + labels: + {{- include "common.labels.standard" .context | nindent 4 }} +--- +{{ end -}} +{{- end -}} + +{{- define "mongodb-sharded.secret" -}} + {{- if .Values.existingSecret -}} + {{- .Values.existingSecret -}} + {{- else }} + {{- include "common.names.fullname" . -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.configServer.primaryHost" -}} + {{- if .Values.configsvr.external.host -}} + {{- .Values.configsvr.external.host }} + {{- else -}} + {{- printf "%s-configsvr-0.%s-headless.%s.svc.%s" (include "common.names.fullname" . ) (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain -}} + {{- end -}} +{{- end -}} + +{{- define "mongodb-sharded.configServer.rsName" -}} + {{- if .Values.configsvr.external.replicasetName -}} + {{- .Values.configsvr.external.replicasetName }} + {{- else }} + {{- printf "%s-configsvr" ( include "common.names.fullname" . ) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.mongos.configCM" -}} + {{- if .Values.mongos.configCM -}} + {{- .Values.mongos.configCM -}} + {{- else }} + {{- printf "%s-mongos" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.shardsvr.dataNode.configCM" -}} + {{- if .Values.shardsvr.dataNode.configCM -}} + {{- .Values.shardsvr.dataNode.configCM -}} + {{- else }} + {{- printf "%s-shardsvr-data" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.shardsvr.arbiter.configCM" -}} + {{- if .Values.shardsvr.arbiter.configCM -}} + {{- .Values.shardsvr.arbiter.configCM -}} + {{- else }} + {{- printf "%s-shardsvr-arbiter" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.configsvr.configCM" -}} + {{- if .Values.configsvr.configCM -}} + {{- .Values.configsvr.configCM -}} + {{- else }} + {{- printf "%s-configsvr" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "mongodb-sharded.initScriptsSecret" -}} + {{- printf "%s" (include "common.tplvalues.render" (dict "value" .Values.common.initScriptsSecret "context" $)) -}} +{{- end -}} + +{{/* +Get the initialization scripts configmap name. +*/}} +{{- define "mongodb-sharded.initScriptsCM" -}} + {{- printf "%s" (include "common.tplvalues.render" (dict "value" .Values.common.initScriptsCM "context" $)) -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb-sharded.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- include "common.names.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb-sharded.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- include "common.names.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Returns the proper Service name depending if an explicit service name is set +in the values file. If the name is not explicitly set it will take the "common.names.fullname" +*/}} +{{- define "mongodb-sharded.serviceName" -}} + {{- if .Values.service.name -}} + {{ .Values.service.name }} + {{- else -}} + {{ include "common.names.fullname" . }} + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB® image name +*/}} +{{- define "mongodb-sharded.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb-sharded.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb-sharded.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb-sharded.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb-sharded.validateValues" -}} + {{- $messages := list -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.mongodbCustomDatabase" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.externalCfgServer" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.replicas" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.config" .) -}} + {{- $messages := without $messages "" -}} + {{- $message := join "\n" $messages -}} + + {{- if $message -}} + {{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} + {{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both mongodbUsername and mongodbDatabase are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb-sharded.validateValues.mongodbCustomDatabase" -}} +{{- if or (and .Values.mongodbUsername (not .Values.mongodbDatabase)) (and (not .Values.mongodbUsername) .Values.mongodbDatabase) }} +mongodb: mongodbUsername, mongodbDatabase + Both mongodbUsername and mongodbDatabase must be provided to create + a custom user and database during 1st initialization. + Please set both of them (--set mongodbUsername="xxxx",mongodbDatabase="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - If using an external config server, then both the host and the replicaset name should be set. +*/}} +{{- define "mongodb-sharded.validateValues.externalCfgServer" -}} +{{- if and .Values.configsvr.external.replicasetName (not .Values.configsvr.external.host) -}} +mongodb: invalidExternalConfigServer + You specified a replica set name for the external config server but not a host. Set both configsvr.external.replicasetName and configsvr.external.host +{{- end -}} +{{- if and (not .Values.configsvr.external.replicasetName) .Values.configsvr.external.host -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the replica set name. Set both configsvr.external.replicasetName and configsvr.external.host +{{- end -}} +{{- if and .Values.configsvr.external.host (not .Values.configsvr.external.rootPassword) -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the root password. Set the configsvr.external.rootPassword value. +{{- end -}} +{{- if and .Values.configsvr.external.host (not .Values.configsvr.external.replicasetKey) -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the replica set key. Set the configsvr.external.replicasetKey value. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - The number of shards must be positive, as well as the data node replicas +*/}} +{{- define "mongodb-sharded.validateValues.replicas" -}} +{{- if and (le (int .Values.shardsvr.dataNode.replicas) 0) (ge (int .Values.shards) 1) }} +mongodb: invalidShardSvrReplicas + You specified an invalid number of replicas per shard. Please set shardsvr.dataNode.replicas with a positive number or set the number of shards to 0. +{{- end -}} +{{- if lt (int .Values.shardsvr.arbiter.replicas) 0 }} +mongodb: invalidShardSvrArbiters + You specified an invalid number of arbiters per shard. Please set shardsvr.arbiter.replicas with a number greater or equal than 0 +{{- end -}} +{{- if and (le (int .Values.configsvr.replicas) 0) (not .Values.configsvr.external.host) }} +mongodb: invalidConfigSvrReplicas + You specified an invalid number of replicas per shard. Please set configsvr.replicas with a positive number or set the configsvr.external.host value to use + an external config server replicaset +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - Cannot use both .config and .configCM +*/}} +{{- define "mongodb-sharded.validateValues.config" -}} +{{- if and .Values.shardsvr.dataNode.configCM .Values.shardsvr.dataNode.config }} +mongodb: shardDataNodeConflictingConfig + You specified both shardsvr.dataNode.configCM and shardsvr.dataNode.config. You can only set one +{{- end -}} +{{- if and .Values.shardsvr.arbiter.configCM .Values.shardsvr.arbiter.config }} +mongodb: arbiterNodeConflictingConfig + You specified both shardsvr.arbiter.configCM and shardsvr.arbiter.config. You can only set one +{{- end -}} +{{- if and .Values.mongos.configCM .Values.mongos.config }} +mongodb: mongosNodeConflictingConfig + You specified both mongos.configCM and mongos.config. You can only set one +{{- end -}} +{{- if and .Values.configsvr.configCM .Values.configsvr.config }} +mongodb: configSvrNodeConflictingConfig + You specified both configsvr.configCM and configsvr.config. You can only set one +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mongodb-sharded.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-configmap.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-configmap.yaml new file mode 100644 index 0000000..0073138 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and (not .Values.configsvr.external.host) .Values.configsvr.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-poddisruptionbudget.yaml new file mode 100644 index 0000000..6138298 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if and (not .Values.configsvr.external.host) .Values.configsvr.pdb.enabled -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr + {{- if .Values.configsvr.pdb.minAvailable }} + minAvailable: {{ .Values.configsvr.pdb.minAvailable | int }} + {{- end }} + {{- if .Values.configsvr.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.configsvr.pdb.maxUnavailable | int }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-podmonitor.yaml new file mode 100644 index 0000000..375c6e8 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-podmonitor.yaml @@ -0,0 +1,32 @@ +{{- if and (not .Values.configsvr.external.host) (and .Values.metrics.enabled .Values.metrics.podMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + {{- if .Values.metrics.podMonitor.namespace }} + namespace: {{ .Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-statefulset.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-statefulset.yaml new file mode 100644 index 0000000..4d4458c --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/config-server/config-server-statefulset.yaml @@ -0,0 +1,376 @@ +{{- if not .Values.configsvr.external.host }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr + serviceName: {{ include "common.names.fullname" . }}-headless + replicas: {{ .Values.configsvr.replicas }} + podManagementPolicy: {{ .Values.configsvr.podManagementPolicy }} + updateStrategy: {{- toYaml .Values.configsvr.updateStrategy | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: configsvr + {{- if .Values.configsvr.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if or .Values.common.podAnnotations .Values.configsvr.podAnnotations .Values.metrics.enabled }} + annotations: + {{- if .Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.common.schedulerName }} + schedulerName: {{ .Values.common.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" .Values.configsvr.serviceAccount "context" $) }} + {{- if .Values.configsvr.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.configsvr.podAffinityPreset "component" "configsvr" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.configsvr.podAntiAffinityPreset "component" "configsvr" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.configsvr.nodeAffinityPreset.type "key" .Values.configsvr.nodeAffinityPreset.key "values" .Values.configsvr.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.configsvr.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.priorityClassName }} + priorityClassName: {{ .Values.configsvr.priorityClassName | quote }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" . | nindent 6 }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.configsvr.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb-sharded.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.configsvr.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.configsvr.persistence.mountPath }} + {{- end }} + {{- with .Values.configsvr.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with .Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb-sharded.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ .Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" .Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_MAX_TIMEOUT + value: {{ .Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_SHARDING_MODE + value: "configsvr" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_PORT_NUMBER + value: {{ .Values.common.containerPorts.mongo | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ include "mongodb-sharded.configServer.primaryHost" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-configsvr" ( include "common.names.fullname" . ) }} + {{- if .Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.configsvr.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.configsvr.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.configsvr.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.common.extraEnvVarsCM .Values.common.extraEnvVarsSecret .Values.configsvr.extraEnvVarsCM .Values.configsvr.extraEnvVarsSecret }} + envFrom: + {{- if .Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - /entrypoint/replicaset-entrypoint.sh + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + - name: replicaset-entrypoint-configmap + mountPath: /entrypoint + - name: datadir + mountPath: {{ .Values.configsvr.persistence.mountPath }} + {{- if or .Values.configsvr.config .Values.configsvr.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if .Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if .Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if .Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.configsvr.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.configsvr.resources | nindent 12 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if .Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ .Values.metrics.extraArgs }} + {{- end }} + {{- if .Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with .Values.configsvr.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with .Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: replicaset-entrypoint-configmap + configMap: + name: {{ include "common.names.fullname" . }}-replicaset-entrypoint + {{- if .Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" . }} + {{- end }} + {{- if or .Values.configsvr.config .Values.configsvr.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.configsvr.configCM" . }} + {{- end }} + {{- if .Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.configsvr.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range .Values.configsvr.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.configsvr.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.configsvr.persistence "global" .Values.global) | nindent 8 }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/extra-list.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/headless-service.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/headless-service.yaml new file mode 100644 index 0000000..6f9c7d8 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/headless-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }}-headless + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-configmap.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-configmap.yaml new file mode 100644 index 0000000..8fb01a5 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.mongos.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.mongos.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-dep-sts.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-dep-sts.yaml new file mode 100644 index 0000000..1b89fde --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-dep-sts.yaml @@ -0,0 +1,319 @@ +apiVersion: {{ if .Values.mongos.useStatefulSet }}{{ include "common.capabilities.statefulset.apiVersion" . }}{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }} +kind: {{ if .Values.mongos.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }} +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +spec: + {{- if .Values.mongos.useStatefulSet }} + serviceName: {{ include "mongodb-sharded.serviceName" . }} + podManagementPolicy: {{ .Values.mongos.podManagementPolicy }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + {{- toYaml .Values.mongos.updateStrategy | nindent 4 }} + replicas: {{ .Values.mongos.replicas }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongos + {{- if .Values.mongos.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.mongos.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if or .Values.common.podAnnotations .Values.mongos.podAnnotations .Values.metrics.enabled }} + annotations: + {{- if .Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.mongos.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.common.schedulerName }} + schedulerName: {{ .Values.common.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.mongos.serviceAccount "context" $) }} + {{- if .Values.mongos.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.mongos.podAffinityPreset "component" "mongos" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.mongos.podAntiAffinityPreset "component" "mongos" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.mongos.nodeAffinityPreset.type "key" .Values.mongos.nodeAffinityPreset.key "values" .Values.mongos.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.mongos.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.priorityClassName }} + priorityClassName: {{ .Values.mongos.priorityClassName | quote }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" . | nindent 6 }} + {{- if or $.Values.mongos.initContainers $.Values.common.initContainers }} + initContainers: + {{- with $.Values.mongos.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: mongos + image: {{ include "mongodb-sharded.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SHARDING_MODE + value: "mongos" + - name: MONGODB_MAX_TIMEOUT + value: {{ .Values.common.mongodbMaxWaitTimeout | quote }} + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME)" + {{- end }} + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_CFG_PRIMARY_HOST + value: {{ include "mongodb-sharded.configServer.primaryHost" . }} + - name: MONGODB_CFG_REPLICA_SET_NAME + value: {{ include "mongodb-sharded.configServer.rsName" . }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongos.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongos.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.mongos.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.common.extraEnvVarsCM .Values.common.extraEnvVarsSecret .Values.mongos.extraEnvVarsCM .Values.mongos.extraEnvVarsSecret }} + envFrom: + {{- if .Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.mongos.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.mongos.extraEnvVarsSecret }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.mongos.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ $.Values.common.containerPorts.mongo }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if or .Values.mongos.config .Values.mongos.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.mongos.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.mongos.resources | nindent 12 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if .Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ .Values.metrics.extraArgs }} + {{- end }} + {{- if .Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.mongos.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" . }} + {{- end }} + {{- if or .Values.mongos.config .Values.mongos.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.mongos.configCM" . }} + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.mongos.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-poddisruptionbudget.yaml new file mode 100644 index 0000000..e7d30fe --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.mongos.pdb.enabled -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos + {{- if .Values.mongos.pdb.minAvailable }} + minAvailable: {{ .Values.mongos.pdb.minAvailable | int }} + {{- end }} + {{- if .Values.mongos.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.mongos.pdb.maxUnavailable | int }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-podmonitor.yaml new file mode 100644 index 0000000..8b1bc19 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-podmonitor.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }}-mongos + {{- if .Values.metrics.podMonitor.namespace }} + namespace: {{ .Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service-per-replica.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service-per-replica.yaml new file mode 100644 index 0000000..6364892 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service-per-replica.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.mongos.useStatefulSet .Values.mongos.servicePerReplica.enabled }} +{{- range $i := until (.Values.mongos.replicas | int) }} +{{- $context := deepCopy $ | merge (dict "index" $i) }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb-sharded.serviceName" $ }}-{{ $i }} + labels: {{ include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongos + annotations: {{- include "common.tplvalues.render" (dict "value" $.Values.mongos.servicePerReplica.annotations "context" $context) | nindent 4 }} +spec: + type: {{ $.Values.mongos.servicePerReplica.type }} + {{- if and $.Values.mongos.servicePerReplica.loadBalancerIP (eq $.Values.mongos.servicePerReplica.type "LoadBalancer") }} + loadBalancerIP: {{ $.Values.mongos.servicePerReplica.loadBalancerIP }} + {{- end }} + {{- if and (eq $.Values.mongos.servicePerReplica.type "LoadBalancer") $.Values.mongos.servicePerReplica.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with $.Values.mongos.servicePerReplica.loadBalancerSourceRanges }} + {{ include "common.tplvalues.render" . | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq $.Values.mongos.servicePerReplica.type "ClusterIP") $.Values.mongos.servicePerReplica.clusterIP }} + clusterIP: {{ $.Values.mongos.servicePerReplica.clusterIP }} + {{- end }} + ports: + - name: mongodb + port: {{ $.Values.mongos.servicePerReplica.port }} + targetPort: mongodb + {{- if $.Values.mongos.servicePerReplica.nodePort }} + nodePort: {{ $.Values.mongos.servicePerReplica.nodePort }} + {{- else if eq $.Values.mongos.servicePerReplica.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if $.Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics + {{- end }} + {{- if $.Values.mongos.servicePerReplica.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.mongos.servicePerReplica.extraPorts "context" $context) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongos + statefulset.kubernetes.io/pod-name: {{ include "common.names.fullname" $ }}-mongos-{{ $i }} + sessionAffinity: {{ default "None" $.Values.mongos.servicePerReplica.sessionAffinity }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service.yaml new file mode 100644 index 0000000..4666378 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/mongos/mongos-service.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb-sharded.serviceName" . }} + labels: {{ include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: mongodb + port: {{ .Values.service.port }} + targetPort: mongodb + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongos + sessionAffinity: {{ default "None" .Values.service.sessionAffinity }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/replicaset-entrypoint-configmap.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/replicaset-entrypoint-configmap.yaml new file mode 100644 index 0000000..7df7c0e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/replicaset-entrypoint-configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-replicaset-entrypoint + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + replicaset-entrypoint.sh: |- + #!/bin/bash + + sleep 5 + + . /liblog.sh + + # Perform adaptations depending on the host name + if [[ $HOSTNAME =~ (.*)-0$ ]]; then + info "Setting node as primary" + export MONGODB_REPLICA_SET_MODE=primary + else + info "Setting node as secondary" + export MONGODB_REPLICA_SET_MODE=secondary + {{- if .Values.usePasswordFile }} + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD_FILE="$MONGODB_ROOT_PASSWORD_FILE" + unset MONGODB_ROOT_PASSWORD_FILE + {{- else }} + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + unset MONGODB_ROOT_PASSWORD + {{- end }} + fi + + exec /entrypoint.sh /run.sh diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/secrets.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/secrets.yaml new file mode 100644 index 0000000..f7839ae --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/secrets.yaml @@ -0,0 +1,30 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +data: + {{- if .Values.configsvr.external.rootPassword }} + mongodb-root-password: {{ .Values.configsvr.external.rootPassword | b64enc | quote }} + {{- else if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.configsvr.external.replicasetKey }} + mongodb-replica-set-key: {{ .Values.configsvr.external.replicasetKey | b64enc | quote }} + {{- else if .Values.replicaSetKey }} + mongodb-replica-set-key: {{ .Values.replicaSetKey | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/serviceaccount.yaml new file mode 100644 index 0000000..676c09a --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.common.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.mongos.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.configsvr.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.shardsvr.arbiter.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.shardsvr.dataNode.serviceAccount "context" $) }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-configmap.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-configmap.yaml new file mode 100644 index 0000000..ef84cb5 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.shards .Values.shardsvr.arbiter.replicas .Values.shardsvr.arbiter.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-shardsvr-arbiter + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: shardsvr-arbiter +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.shardsvr.arbiter.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-statefulset.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-statefulset.yaml new file mode 100644 index 0000000..c48dfe0 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-arbiter-statefulset.yaml @@ -0,0 +1,337 @@ +{{- if and .Values.shards .Values.shardsvr.arbiter.replicas }} +{{- $replicas := $.Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ printf "%s-shard%d-arbiter" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr-arbiter +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr-arbiter + podManagementPolicy: {{ $.Values.shardsvr.arbiter.podManagementPolicy }} + updateStrategy: {{- toYaml $.Values.shardsvr.arbiter.updateStrategy | nindent 4 }} + serviceName: {{ include "common.names.fullname" $ }}-headless + replicas: {{ $.Values.shardsvr.arbiter.replicas }} + template: + metadata: + labels: {{- include "common.labels.standard" $ | nindent 8 }} + app.kubernetes.io/component: shardsvr-arbiter + {{- if $.Values.shardsvr.arbiter.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.podLabels "context" $ ) | nindent 8 }} + {{- end }} + shard: {{ $i | quote }} + {{- if or $.Values.common.podAnnotations $.Values.shardsvr.arbiter.podAnnotations $.Values.metrics.enabled }} + annotations: + {{- if $.Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.shardsvr.arbiter.serviceAccount "context" $) }} + {{- if $.Values.common.schedulerName }} + schedulerName: {{ $.Values.common.schedulerName | quote }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.affinity "context" (set $ "arbiterLoopId" $i)) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.arbiter.podAffinityPreset "component" "shardsvr-arbiter" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.arbiter.podAntiAffinityPreset "component" "shardsvr-arbiter" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.shardsvr.arbiter.nodeAffinityPreset.type "key" $.Values.shardsvr.arbiter.nodeAffinityPreset.key "values" $.Values.shardsvr.arbiter.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.priorityClassName }} + priorityClassName: {{ $.Values.shardsvr.arbiter.priorityClassName | quote }} + {{- end }} + {{- if $.Values.securityContext.enabled }} + securityContext: + fsGroup: {{ $.Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" $ | nindent 6 }} + {{- if or $.Values.shardsvr.arbiter.initContainers $.Values.common.initContainers }} + initContainers: + {{- with $.Values.shardsvr.arbiter.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: {{ include "common.names.fullname" $ }}-arbiter + image: {{ include "mongodb-sharded.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ $.Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ $.Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if $.Values.common.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_MAX_TIMEOUT + value: {{ $.Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_SHARDING_MODE + value: "shardsvr" + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-shard%d-data-0.%s-headless.%s.svc.%s" (include "common.names.fullname" $ ) $i (include "common.names.fullname" $ ) $.Release.Namespace $.Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-shard-%d" ( include "common.names.fullname" $ ) $i }} + {{- if $.Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if $.Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if $.Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-replica-set-key + {{- end }} + {{- if $.Values.shardsvr.arbiter.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $.Values.shardsvr.arbiter.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if $.Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or $.Values.common.extraEnvVarsCM $.Values.common.extraEnvVarsSecret $.Values.shardsvr.arbiter.extraEnvVarsCM $.Values.shardsvr.arbiter.extraEnvVarsSecret }} + envFrom: + {{- if $.Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVarsSecret }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.livenessProbe.successThreshold }} + failureThreshold: {{ $.Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if $.Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.readinessProbe.successThreshold }} + failureThreshold: {{ $.Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + {{- if or $.Values.shardsvr.arbiter.config $.Values.shardsvr.arbiter.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml $.Values.shardsvr.arbiter.resources | nindent 12 }} + {{- if $.Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" $ }} + imagePullPolicy: {{ $.Values.metrics.image.pullPolicy | quote }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if $.Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ $.Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ $.Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ $.Values.metrics.extraArgs }} + {{- end }} + {{- if $.Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ $.Values.metrics.containerPort }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if $.Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml $.Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.shardsvr.arbiter.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or $.Values.shardsvr.arbiter.config $.Values.shardsvr.arbiter.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.shardsvr.arbiter.configCM" $ }} + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" $ }} + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} +{{- if lt $i (sub $replicas 1) }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-configmap.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-configmap.yaml new file mode 100644 index 0000000..95ac001 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.shards .Values.shardsvr.dataNode.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-shardsvr-data + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: shardsvr +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.shardsvr.dataNode.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-poddisruptionbudget.yaml new file mode 100644 index 0000000..3cd357e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-poddisruptionbudget.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.shards .Values.shardsvr.dataNode.pdb.enabled -}} +{{- $replicas := .Values.shards | int -}} +{{- range $i, $e := until $replicas -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr + shard: {{ $i | quote }} + {{- if $.Values.shardsvr.dataNode.pdb.minAvailable }} + minAvailable: {{ $.Values.shardsvr.dataNode.pdb.minAvailable | int }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.pdb.maxUnavailable }} + maxUnavailable: {{ $.Values.shardsvr.dataNode.pdb.maxUnavailable | int }} + {{- end }} +--- +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-podmonitor.yaml new file mode 100644 index 0000000..3c689fc --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-podmonitor.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.shards .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +{{- $replicas := .Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + {{- if $.Values.metrics.podMonitor.namespace }} + namespace: {{ $.Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ $.Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr + {{- if $.Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" $.Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if $.Values.metrics.podMonitor.interval }} + interval: {{ $.Values.metrics.podMonitor.interval }} + {{- end }} + {{- if $.Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ $.Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ $.Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-statefulset.yaml b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-statefulset.yaml new file mode 100644 index 0000000..f1f04d2 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/templates/shard/shard-data-statefulset.yaml @@ -0,0 +1,387 @@ +{{- if .Values.shards }} +{{- $replicas := .Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr + podManagementPolicy: {{ $.Values.shardsvr.dataNode.podManagementPolicy }} + updateStrategy: {{- toYaml $.Values.shardsvr.dataNode.updateStrategy | nindent 4 }} + serviceName: {{ include "common.names.fullname" $ }}-headless + replicas: {{ $.Values.shardsvr.dataNode.replicas }} + template: + metadata: + labels: {{- include "common.labels.standard" $ | nindent 8 }} + app.kubernetes.io/component: shardsvr + {{- if $.Values.shardsvr.dataNode.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.podLabels "context" $ ) | nindent 8 }} + {{- end }} + shard: {{ $i | quote }} + {{- if or $.Values.common.podAnnotations $.Values.shardsvr.dataNode.podAnnotations $.Values.metrics.enabled }} + annotations: + {{- if $.Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if $.Values.common.schedulerName }} + schedulerName: {{ $.Values.common.schedulerName | quote }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.affinity "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.dataNode.podAffinityPreset "component" "shardsvr" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.dataNode.podAntiAffinityPreset "component" "shardsvr" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.shardsvr.dataNode.nodeAffinityPreset.type "key" $.Values.shardsvr.dataNode.nodeAffinityPreset.key "values" $.Values.shardsvr.dataNode.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.nodeSelector "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.tolerations "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.shardsvr.dataNode.serviceAccount "context" $) }} + {{- if $.Values.shardsvr.dataNode.priorityClassName }} + priorityClassName: {{ $.Values.shardsvr.dataNode.priorityClassName | quote }} + {{- end }} + {{- if $.Values.securityContext.enabled }} + securityContext: + fsGroup: {{ $.Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" $ | nindent 6 }} + initContainers: + {{- if and $.Values.volumePermissions.enabled $.Values.shardsvr.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb-sharded.volumePermissions.image" $ }} + imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ $.Values.securityContext.runAsUser }}:{{ $.Values.securityContext.fsGroup }}", "{{ $.Values.shardsvr.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml $.Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: {{ $.Values.shardsvr.persistence.mountPath }} + {{- end }} + {{- with $.Values.shardsvr.dataNode.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb-sharded.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ $.Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ $.Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_MAX_TIMEOUT + value: {{ $.Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if $.Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_SHARDING_MODE + value: "shardsvr" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_MONGOS_HOST + value: {{ include "mongodb-sharded.serviceName" $ }} + - name: MONGODB_MONGOS_PORT_NUMBER + value: {{ $.Values.service.port | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-shard%d-data-0.%s-headless.%s.svc.%s" (include "common.names.fullname" $ ) $i (include "common.names.fullname" $ ) $.Release.Namespace $.Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-shard-%d" ( include "common.names.fullname" $ ) $i }} + {{- if $.Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}" + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if $.Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if $.Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if $.Values.shardsvr.dataNode.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $.Values.shardsvr.dataNode.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if $.Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or $.Values.common.extraEnvVarsCM $.Values.common.extraEnvVarsSecret $.Values.shardsvr.dataNode.extraEnvVarsCM $.Values.shardsvr.dataNode.extraEnvVarsSecret }} + envFrom: + {{- if $.Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVarsCM }} + - configMapRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVarsSecret }} + - secretRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - /entrypoint/replicaset-entrypoint.sh + {{- end }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.livenessProbe.successThreshold }} + failureThreshold: {{ $.Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if $.Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.readinessProbe.successThreshold }} + failureThreshold: {{ $.Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + - name: replicaset-entrypoint-configmap + mountPath: /entrypoint + - name: datadir + mountPath: {{ $.Values.shardsvr.persistence.mountPath }} + {{- if or $.Values.shardsvr.dataNode.config $.Values.shardsvr.dataNode.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml $.Values.shardsvr.dataNode.resources | nindent 12 }} + {{- if $.Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" $ }} + imagePullPolicy: {{ $.Values.metrics.image.pullPolicy | quote }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if $.Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ $.Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ $.Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ $.Values.metrics.extraArgs }} + {{- end }} + {{- if $.Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ $.Values.metrics.containerPort }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if $.Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml $.Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.shardsvr.dataNode.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: replicaset-entrypoint-configmap + configMap: + name: {{ include "common.names.fullname" $ }}-replicaset-entrypoint + {{- if $.Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" $ }} + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" $ }} + defaultMode: 0755 + {{- end }} + {{- if or $.Values.shardsvr.dataNode.config $.Values.shardsvr.dataNode.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.shardsvr.dataNode.configCM" $ }} + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := $.Values.shardsvr.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range $.Values.shardsvr.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ $.Values.shardsvr.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" $.Values.shardsvr.persistence "global" $.Values.global) | nindent 8 }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} +{{- if lt $i (sub $replicas 1) }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-dsk/values.yaml b/ansible/roles/helm_install/files/mongo-dsk/values.yaml new file mode 100644 index 0000000..caf1b40 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-dsk/values.yaml @@ -0,0 +1,1217 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global storage class for dynamic provisioning +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param nameOverride String to partially override mongodb.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override mongodb.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## ref: https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#introduction +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MongoDB(®) Sharded parameters +## + +## Bitnami MongoDB(®) Sharded image version +## ref: https://hub.docker.com/r/bitnami/mongodb-sharded/tags/ +## @param image.registry MongoDB(®) Sharded image registry +## @param image.repository MongoDB(®) Sharded Image name +## @param image.tag MongoDB(®) Sharded image tag (immutable tags are recommended) +## @param image.pullPolicy MongoDB(®) Sharded image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/mongodb-sharded + tag: 4.4.13-debian-10-r5 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## MongoDB® credentials +## @param mongodbRootPassword MongoDB® root password +## If set to null it will be randomly generated +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## e.g: +## mongodbRootPassword: password +## +mongodbRootPassword: "" +## @param replicaSetKey Replica Set key (shared for shards and config servers) +## e.g: +## replicaSetKey: testkey123 +## +replicaSetKey: "" +## @param existingSecret Existing secret with MongoDB® credentials +## e.g: +## existingSecret: name-of-existing-secret +## +existingSecret: "" +## @param usePasswordFile Mount credentials as files instead of using environment variables +## +usePasswordFile: false +## @param shards Number of shards to be created +## ref: https://docs.mongodb.com/manual/core/sharded-cluster-shards/ +## +shards: 2 +## Properties for all of the pods in the cluster (shards, config servers and mongos) +## +common: + ## @param common.mongodbEnableNumactl Enable launch MongoDB instance prefixed with "numactl --interleave=all" + ## ref: https://docs.mongodb.com/manual/administration/production-notes/#mongodb-and-numa-hardware + ## + mongodbEnableNumactl: false + ## @param common.useHostnames Enable DNS hostnames in the replica set config + ## + useHostnames: true + ## @param common.mongodbEnableIPv6 Switch to enable/disable IPv6 on MongoDB® + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 + ## + mongodbEnableIPv6: false + ## @param common.mongodbDirectoryPerDB Switch to enable/disable DirectoryPerDB on MongoDB® + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb + ## + mongodbDirectoryPerDB: false + ## @param common.mongodbSystemLogVerbosity MongoDB® system log verbosity level + ## ref: https://docs.mongodb.com/manual/reference/program/mongo/#cmdoption-mongo-ipv6 + ## + mongodbSystemLogVerbosity: 0 + ## @param common.mongodbDisableSystemLog Whether to disable MongoDB® system log or not + ## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level + ## + mongodbDisableSystemLog: false + ## @param common.mongodbMaxWaitTimeout Maximum time (in seconds) for MongoDB® nodes to wait for another MongoDB® node to be ready + ## + mongodbMaxWaitTimeout: 120 + ## @param common.initScriptsCM Configmap with init scripts to execute + ## + initScriptsCM: "" + ## @param common.initScriptsSecret Secret with init scripts to execute (for sensitive data) + ## + initScriptsSecret: "" + ## @param common.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param common.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param common.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param common.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param common.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param common.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param common.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param common.extraVolumes Array to add extra volumes + ## + extraVolumes: [] + ## @param common.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes) + ## + extraVolumeMounts: [] + ## @param common.containerPorts.mongo MongoDB container port + ## + containerPorts: + mongo: 27017 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param common.serviceAccount.create Whether to create a Service Account for all pods automatically + ## + create: false + ## @param common.serviceAccount.name Name of a Service Account to be used by all Pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r358 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources Init container resource requests/limit + ## + resources: {} +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## @param securityContext.enabled Enable security context +## @param securityContext.fsGroup Group ID for the container +## @param securityContext.runAsUser User ID for the container +## @param securityContext.runAsNonRoot Run containers as non-root users +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + runAsNonRoot: true +## Kubernetes service type +## ref: https://kubernetes.io/docs/concepts/services-networking/service/ +## +service: + ## @param service.name Specify an explicit service name + ## + name: "" + ## @param service.annotations Additional service annotations (evaluate as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param service.type Service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + type: ClusterIP + ## @param service.externalTrafficPolicy External traffic policy + ## Enable client source IP preservation + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + externalTrafficPolicy: Cluster + ## @param service.port MongoDB® service port + ## + port: 27017 + ## @param service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#servicespec-v1-core + ## + clusterIP: "" + ## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param service.externalIPs External IP list to use with ClusterIP service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.loadBalancerIP Static IP Address to use for LoadBalancer service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges List of IP ranges allowed access to load balancer (if supported) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same mongos Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None +## Configure extra options for liveness probes +## This applies to all the MongoDB® in the sharded cluster +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## Configure extra options for readiness probe +## This applies to all the MongoDB® in the sharded cluster +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## @section Config Server parameters +## + +## Config Server replica set properties +## ref: https://docs.mongodb.com/manual/core/sharded-cluster-config-servers/ +## +configsvr: + ## @param configsvr.replicas Number of nodes in the replica set (the first node will be primary) + ## + replicas: 1 + ## @param configsvr.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param configsvr.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param configsvr.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param configsvr.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param configsvr.podAffinityPreset Config Server Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param configsvr.podAntiAffinityPreset Config Server Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param configsvr.nodeAffinityPreset.type Config Server Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param configsvr.nodeAffinityPreset.key Config Server Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param configsvr.nodeAffinityPreset.values Config Server Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param configsvr.affinity Config Server Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: configsvr.podAffinityPreset, configsvr.podAntiAffinityPreset, and configsvr.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param configsvr.nodeSelector Config Server Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param configsvr.tolerations Config Server Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param configsvr.podManagementPolicy Statefulset's pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param configsvr.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param configsvr.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param configsvr.configCM ConfigMap name with Config Server configuration file (cannot be used with configsvr.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param configsvr.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param configsvr.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param configsvr.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param configsvr.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param configsvr.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param configsvr.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param configsvr.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param configsvr.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param configsvr.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes). Normally used with `extraVolumes` + ## + extraVolumeMounts: [] + ## @param configsvr.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Pod disruption budget + ## + pdb: + ## @param configsvr.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param configsvr.pdb.minAvailable Minimum number of available config pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param configsvr.pdb.maxUnavailable Maximum number of unavailable config pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param configsvr.persistence.enabled Use a PVC to persist data + ## + enabled: true + ## @param configsvr.persistence.mountPath Path to mount the volume at + ## MongoDB® images. + ## + mountPath: /bitnami/mongodb + ## @param configsvr.persistence.subPath Subdirectory of the volume to mount at + ## Useful in dev environments and one PV for multiple services. + ## + subPath: "" + ## @param configsvr.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param configsvr.persistence.accessModes Use volume as ReadOnly or ReadWrite + ## + accessModes: + - ReadWriteOnce + ## @param configsvr.persistence.size PersistentVolumeClaim size + ## + size: 8Gi + ## @param configsvr.persistence.annotations Persistent Volume annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param configsvr.serviceAccount.create Specifies whether a ServiceAccount should be created for Config Server + ## + create: false + ## @param configsvr.serviceAccount.name Name of a Service Account to be used by Config Server + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + ## Use a external config server instead of deploying one + ## + external: + ## @param configsvr.external.host Primary node of an external Config Server replicaset + ## + host: "" + ## @param configsvr.external.rootPassword Root password of the external Config Server replicaset + ## + rootPassword: "" + ## @param configsvr.external.replicasetName Replicaset name of an external Config Server + ## + replicasetName: "" + ## @param configsvr.external.replicasetKey Replicaset key of an external Config Server + ## + replicasetKey: "" + +## @section Mongos parameters +## + +## Mongos properties +## ref: https://docs.mongodb.com/manual/reference/program/mongos/#bin.mongos +## +mongos: + ## @param mongos.replicas Number of replicas + ## + replicas: 1 + ## @param mongos.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param mongos.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param mongos.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param mongos.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param mongos.podAffinityPreset Mongos Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param mongos.podAntiAffinityPreset Mongos Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param mongos.nodeAffinityPreset.type Mongos Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param mongos.nodeAffinityPreset.key Mongos Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param mongos.nodeAffinityPreset.values Mongos Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param mongos.affinity Mongos Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: mongos.podAffinityPreset, mongos.podAntiAffinityPreset, and mongos.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param mongos.nodeSelector Mongos Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param mongos.tolerations Mongos Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param mongos.podManagementPolicy Statefulsets pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param mongos.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param mongos.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param mongos.configCM ConfigMap name with MongoDB® configuration file (cannot be used with mongos.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param mongos.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param mongos.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param mongos.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param mongos.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param mongos.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param mongos.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param mongos.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param mongos.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param mongos.extraVolumeMounts Array to add extra volume mounts. Normally used with `extraVolumes`. + ## + extraVolumeMounts: [] + ## @param mongos.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param mongos.useStatefulSet Use StatefulSet instead of Deployment + ## + useStatefulSet: false + ## When using a statefulset, you can enable one service per replica + ## This is useful when exposing the mongos through load balancers to make sure clients + ## connect to the same mongos and therefore can follow their cursors + ## + servicePerReplica: + ## @param mongos.servicePerReplica.enabled Create one service per mongos replica (must be used with statefulset) + ## + enabled: false + ## @param mongos.servicePerReplica.annotations Additional service annotations (evaluate as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param mongos.servicePerReplica.type Service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + type: ClusterIP + ## @param mongos.servicePerReplica.externalTrafficPolicy External traffic policy + ## Enable client source IP preservation + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + externalTrafficPolicy: Cluster + ## @param mongos.servicePerReplica.port MongoDB® service port + ## + port: 27017 + ## @param mongos.servicePerReplica.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#servicespec-v1-core + ## + clusterIP: "" + ## @param mongos.servicePerReplica.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param mongos.servicePerReplica.externalIPs External IP list to use with ClusterIP service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param mongos.servicePerReplica.loadBalancerIP Static IP Address to use for LoadBalancer service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param mongos.servicePerReplica.loadBalancerSourceRanges List of IP ranges allowed access to load balancer (if supported) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param mongos.servicePerReplica.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param mongos.servicePerReplica.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same mongos Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## Pod disruption budget + ## + pdb: + ## @param mongos.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param mongos.pdb.minAvailable Minimum number of available mongo pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param mongos.pdb.maxUnavailable Maximum number of unavailable mongo pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param mongos.serviceAccount.create Whether to create a Service Account for mongos automatically + ## + create: false + ## @param mongos.serviceAccount.name Name of a Service Account to be used by mongos + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## @section Shard configuration: Data node parameters +## + +## Shard replica set properties +## ref: https://docs.mongodb.com/manual/replication/index.html +## +shardsvr: + ## Properties for data nodes (primary and secondary) + ## + dataNode: + ## @param shardsvr.dataNode.replicas Number of nodes in each shard replica set (the first node will be primary) + ## + replicas: 1 + ## @param shardsvr.dataNode.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param shardsvr.dataNode.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param shardsvr.dataNode.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param shardsvr.dataNode.podAffinityPreset Data nodes Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param shardsvr.dataNode.podAntiAffinityPreset Data nodes Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param shardsvr.dataNode.nodeAffinityPreset.type Data nodes Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param shardsvr.dataNode.nodeAffinityPreset.key Data nodes Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param shardsvr.dataNode.nodeAffinityPreset.values Data nodes Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param shardsvr.dataNode.affinity Data nodes Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'affinity' section: + ## affinity: + ## matchLabels: + ## shard: "{{ .dataNodeLoopId }}" + ## + ## Note: shardsvr.dataNode.podAffinityPreset, shardsvr.dataNode.podAntiAffinityPreset, and shardsvr.dataNode.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param shardsvr.dataNode.nodeSelector Data nodes Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'nodeSelector' section: + ## nodeSelector: { shardId: "{{ .dataNodeLoopId }}" } + ## + nodeSelector: {} + ## @param shardsvr.dataNode.tolerations Data nodes Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'nodeSelector' section: + ## tolerations: + ## - key: "shardId" + ## operator: "Equal" + ## value: "{{ .dataNodeLoopId }}" + ## effect: "NoSchedule" + ## + tolerations: [] + ## @param shardsvr.dataNode.podManagementPolicy podManagementPolicy for the statefulset, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param shardsvr.dataNode.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param shardsvr.dataNode.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param shardsvr.dataNode.config Entries for the MongoDB® config file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param shardsvr.dataNode.configCM ConfigMap name with MongoDB® configuration (cannot be used with shardsvr.dataNode.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param shardsvr.dataNode.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param shardsvr.dataNode.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param shardsvr.dataNode.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param shardsvr.dataNode.sidecars Attach additional containers (evaluated as a template) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param shardsvr.dataNode.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param shardsvr.dataNode.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param shardsvr.dataNode.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param shardsvr.dataNode.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param shardsvr.dataNode.extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes` + ## + extraVolumeMounts: [] + ## @param shardsvr.dataNode.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Pod disruption budget + ## + pdb: + ## @param shardsvr.dataNode.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param shardsvr.dataNode.pdb.minAvailable Minimum number of available data pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param shardsvr.dataNode.pdb.maxUnavailable Maximum number of unavailable data pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param shardsvr.dataNode.serviceAccount.create Specifies whether a ServiceAccount should be created for shardsvr + ## + create: false + ## @param shardsvr.dataNode.serviceAccount.name Name of a Service Account to be used by shardsvr data pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + + ## @section Shard configuration: Persistence parameters + ## + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param shardsvr.persistence.enabled Use a PVC to persist data + ## + enabled: true + ## @param shardsvr.persistence.mountPath The path the volume will be mounted at, useful when using different MongoDB® images. + ## + mountPath: /bitnami/mongodb + ## @param shardsvr.persistence.subPath Subdirectory of the volume to mount at + ## Useful in development environments and one PV for multiple services. + ## + subPath: "" + ## @param shardsvr.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param shardsvr.persistence.accessModes Use volume as ReadOnly or ReadWrite + ## + accessModes: + - ReadWriteOnce + ## @param shardsvr.persistence.size PersistentVolumeClaim size + ## + size: 8Gi + ## @param shardsvr.persistence.annotations Additional volume annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + + ## @section Shard configuration: Arbiter parameters + ## + + ## Properties for arbiter nodes + ## ref: https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + ## + arbiter: + ## @param shardsvr.arbiter.replicas Number of arbiters in each shard replica set (the first node will be primary) + ## + replicas: 0 + ## @param shardsvr.arbiter.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param shardsvr.arbiter.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param shardsvr.arbiter.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param shardsvr.arbiter.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param shardsvr.arbiter.podAffinityPreset Arbiter's Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param shardsvr.arbiter.podAntiAffinityPreset Arbiter's Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param shardsvr.arbiter.nodeAffinityPreset.type Arbiter's Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param shardsvr.arbiter.nodeAffinityPreset.key Arbiter's Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param shardsvr.arbiter.nodeAffinityPreset.values Arbiter's Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param shardsvr.arbiter.affinity Arbiter's Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## You can set arbiterLoopId (or any other parameter) by setting the below code block under this 'affinity' section: + ## affinity: + ## matchLabels: + ## shard: "{{ .arbiterLoopId }}" + ## + ## Note: shardsvr.arbiter.podAffinityPreset, shardsvr.arbiter.podAntiAffinityPreset, and shardsvr.arbiter.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param shardsvr.arbiter.nodeSelector Arbiter's Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param shardsvr.arbiter.tolerations Arbiter's Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param shardsvr.arbiter.podManagementPolicy Statefulset's pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param shardsvr.arbiter.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param shardsvr.arbiter.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param shardsvr.arbiter.configCM ConfigMap name with MongoDB® configuration file (cannot be used with shardsvr.arbiter.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param shardsvr.arbiter.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param shardsvr.arbiter.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param shardsvr.arbiter.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param shardsvr.arbiter.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param shardsvr.arbiter.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param shardsvr.arbiter.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param shardsvr.arbiter.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param shardsvr.arbiter.extraVolumes Array to add extra volumes + ## + extraVolumes: [] + ## @param shardsvr.arbiter.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes) + ## + extraVolumeMounts: [] + ## @param shardsvr.arbiter.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param shardsvr.arbiter.serviceAccount.create Specifies whether a ServiceAccount should be created for shardsvr arbiter nodes + ## + create: false + ## @param shardsvr.arbiter.serviceAccount.name Name of a Service Account to be used by shardsvr arbiter pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## @section Metrics parameters +## + +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry MongoDB® exporter image registry + ## @param metrics.image.repository MongoDB® exporter image name + ## @param metrics.image.tag MongoDB® exporter image tag + ## @param metrics.image.pullPolicy MongoDB® exporter image pull policy + ## @param metrics.image.pullSecrets MongoDB® exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.30.0-debian-10-r91 + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.useTLS Whether to connect to MongoDB® with TLS + useTLS: false + ## @param metrics.extraArgs String with extra arguments to the metrics exporter + ## ref: https://github.com/dcu/mongodb_exporter/blob/master/mongodb_exporter.go + ## + extraArgs: "" + ## @param metrics.resources Metrics exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## Metrics exporter liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: false + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param metrics.containerPort Port of the Prometheus metrics container + ## + containerPort: 9216 + ## @param metrics.podAnnotations [object] Metrics exporter pod Annotation + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.containerPort }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace Namespace where podmonitor resource should be created + ## + namespace: monitoring + ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitors will be discovered by Prometheus + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/ansible/roles/helm_install/files/mongo-manifest/.helmignore b/ansible/roles/helm_install/files/mongo-manifest/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/mongo-manifest/Chart.lock b/ansible/roles/helm_install/files/mongo-manifest/Chart.lock new file mode 100644 index 0000000..b8814b6 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.11.3 +digest: sha256:d5f850d857edd58b32c0e10652f6ec3ce5018def5542f2bcef38fd7fa0079d6b +generated: "2022-03-07T11:59:31.665943918Z" diff --git a/ansible/roles/helm_install/files/mongo-manifest/Chart.yaml b/ansible/roles/helm_install/files/mongo-manifest/Chart.yaml new file mode 100644 index 0000000..8bf0c80 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 4.4.13 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: MongoDB(R) is an open source NoSQL database that uses JSON for data storage. + MongoDB(TM) Sharded improves scalability and reliability for large datasets by distributing + data across multiple machines. +home: https://github.com/bitnami/charts/tree/master/bitnami/mongodb-sharded +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb-sharded +sources: +- https://github.com/bitnami/bitnami-docker-mongodb-sharded +- https://mongodb.org +version: 4.0.10 diff --git a/ansible/roles/helm_install/files/mongo-manifest/README.md b/ansible/roles/helm_install/files/mongo-manifest/README.md new file mode 100644 index 0000000..37dbc29 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/README.md @@ -0,0 +1,548 @@ + + +# MongoDB(R) Sharded packaged by Bitnami + +MongoDB(R) is an open source NoSQL database that uses JSON for data storage. MongoDB(TM) Sharded improves scalability and reliability for large datasets by distributing data across multiple machines. + +[Overview of MongoDB® Sharded](http://www.mongodb.org) + +Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB(R) is run and maintained by MongoDB, which is a completely separate project from Bitnami. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mongodb-sharded +``` + +## Introduction + +This chart bootstraps a [MongoDB(®) Sharded](https://github.com/bitnami/bitnami-docker-mongodb-sharded) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Classified as a NoSQL database, MongoDB® eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +This chart uses the [sharding method](https://docs.mongodb.com/manual/sharding/) for distributing data across multiple machines. This is meant for deployments with very large data sets and high throughput operations. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mongodb-sharded +``` + +The command deploys MongoDB® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global storage class for dynamic provisioning | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override mongodb.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MongoDB(®) Sharded parameters + +| Name | Description | Value | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `image.registry` | MongoDB(®) Sharded image registry | `docker.io` | +| `image.repository` | MongoDB(®) Sharded Image name | `bitnami/mongodb-sharded` | +| `image.tag` | MongoDB(®) Sharded image tag (immutable tags are recommended) | `4.4.11-debian-10-r6` | +| `image.pullPolicy` | MongoDB(®) Sharded image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `mongodbRootPassword` | MongoDB® root password | `""` | +| `replicaSetKey` | Replica Set key (shared for shards and config servers) | `""` | +| `existingSecret` | Existing secret with MongoDB® credentials | `""` | +| `usePasswordFile` | Mount credentials as files instead of using environment variables | `false` | +| `shards` | Number of shards to be created | `2` | +| `common.mongodbEnableNumactl` | Enable launch MongoDB instance prefixed with "numactl --interleave=all" | `false` | +| `common.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `common.mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB® | `false` | +| `common.mongodbDirectoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB® | `false` | +| `common.mongodbSystemLogVerbosity` | MongoDB® system log verbosity level | `0` | +| `common.mongodbDisableSystemLog` | Whether to disable MongoDB® system log or not | `false` | +| `common.mongodbMaxWaitTimeout` | Maximum time (in seconds) for MongoDB® nodes to wait for another MongoDB® node to be ready | `120` | +| `common.initScriptsCM` | Configmap with init scripts to execute | `""` | +| `common.initScriptsSecret` | Secret with init scripts to execute (for sensitive data) | `""` | +| `common.extraEnvVars` | An array to add extra env vars | `[]` | +| `common.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `common.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `common.sidecars` | Add sidecars to the pod | `[]` | +| `common.initContainers` | Add init containers to the pod | `[]` | +| `common.podAnnotations` | Additional pod annotations | `{}` | +| `common.podLabels` | Additional pod labels | `{}` | +| `common.extraVolumes` | Array to add extra volumes | `[]` | +| `common.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes) | `[]` | +| `common.containerPorts.mongo` | MongoDB container port | `27017` | +| `common.serviceAccount.create` | Whether to create a Service Account for all pods automatically | `false` | +| `common.serviceAccount.name` | Name of a Service Account to be used by all Pods | `""` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r308` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources` | Init container resource requests/limit | `{}` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `securityContext.runAsNonRoot` | Run containers as non-root users | `true` | +| `service.name` | Specify an explicit service name | `""` | +| `service.annotations` | Additional service annotations (evaluate as a template) | `{}` | +| `service.type` | Service type | `ClusterIP` | +| `service.externalTrafficPolicy` | External traffic policy | `Cluster` | +| `service.port` | MongoDB® service port | `27017` | +| `service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `service.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types. | `""` | +| `service.externalIPs` | External IP list to use with ClusterIP service type | `[]` | +| `service.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `""` | +| `service.loadBalancerSourceRanges` | List of IP ranges allowed access to load balancer (if supported) | `[]` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | + + +### Config Server parameters + +| Name | Description | Value | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------- | +| `configsvr.replicas` | Number of nodes in the replica set (the first node will be primary) | `1` | +| `configsvr.resources` | Configure pod resources | `{}` | +| `configsvr.hostAliases` | Deployment pod host aliases | `[]` | +| `configsvr.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `configsvr.priorityClassName` | Pod priority class name | `""` | +| `configsvr.podAffinityPreset` | Config Server Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `configsvr.podAntiAffinityPreset` | Config Server Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `configsvr.nodeAffinityPreset.type` | Config Server Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `configsvr.nodeAffinityPreset.key` | Config Server Node label key to match Ignored if `affinity` is set. | `""` | +| `configsvr.nodeAffinityPreset.values` | Config Server Node label values to match. Ignored if `affinity` is set. | `[]` | +| `configsvr.affinity` | Config Server Affinity for pod assignment | `{}` | +| `configsvr.nodeSelector` | Config Server Node labels for pod assignment | `{}` | +| `configsvr.tolerations` | Config Server Tolerations for pod assignment | `[]` | +| `configsvr.podManagementPolicy` | Statefulset's pod management policy, allows parallel startup of pods | `OrderedReady` | +| `configsvr.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `configsvr.config` | MongoDB® configuration file | `""` | +| `configsvr.configCM` | ConfigMap name with Config Server configuration file (cannot be used with configsvr.config) | `""` | +| `configsvr.extraEnvVars` | An array to add extra env vars | `[]` | +| `configsvr.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `configsvr.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `configsvr.sidecars` | Add sidecars to the pod | `[]` | +| `configsvr.initContainers` | Add init containers to the pod | `[]` | +| `configsvr.podAnnotations` | Additional pod annotations | `{}` | +| `configsvr.podLabels` | Additional pod labels | `{}` | +| `configsvr.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `configsvr.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes). Normally used with `extraVolumes` | `[]` | +| `configsvr.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `configsvr.pdb.enabled` | Enable pod disruption budget | `false` | +| `configsvr.pdb.minAvailable` | Minimum number of available config pods allowed (`0` to disable) | `0` | +| `configsvr.pdb.maxUnavailable` | Maximum number of unavailable config pods allowed (`0` to disable) | `1` | +| `configsvr.persistence.enabled` | Use a PVC to persist data | `true` | +| `configsvr.persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` | +| `configsvr.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `configsvr.persistence.storageClass` | Storage class of backing PVC | `""` | +| `configsvr.persistence.accessModes` | Use volume as ReadOnly or ReadWrite | `["ReadWriteOnce"]` | +| `configsvr.persistence.size` | PersistentVolumeClaim size | `8Gi` | +| `configsvr.persistence.annotations` | Persistent Volume annotations | `{}` | +| `configsvr.serviceAccount.create` | Specifies whether a ServiceAccount should be created for Config Server | `false` | +| `configsvr.serviceAccount.name` | Name of a Service Account to be used by Config Server | `""` | +| `configsvr.external.host` | Primary node of an external Config Server replicaset | `""` | +| `configsvr.external.rootPassword` | Root password of the external Config Server replicaset | `""` | +| `configsvr.external.replicasetName` | Replicaset name of an external Config Server | `""` | +| `configsvr.external.replicasetKey` | Replicaset key of an external Config Server | `""` | + + +### Mongos parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------ | --------------- | +| `mongos.replicas` | Number of replicas | `1` | +| `mongos.resources` | Configure pod resources | `{}` | +| `mongos.hostAliases` | Deployment pod host aliases | `[]` | +| `mongos.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `mongos.priorityClassName` | Pod priority class name | `""` | +| `mongos.podAffinityPreset` | Mongos Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `mongos.podAntiAffinityPreset` | Mongos Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `mongos.nodeAffinityPreset.type` | Mongos Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `mongos.nodeAffinityPreset.key` | Mongos Node label key to match Ignored if `affinity` is set. | `""` | +| `mongos.nodeAffinityPreset.values` | Mongos Node label values to match. Ignored if `affinity` is set. | `[]` | +| `mongos.affinity` | Mongos Affinity for pod assignment | `{}` | +| `mongos.nodeSelector` | Mongos Node labels for pod assignment | `{}` | +| `mongos.tolerations` | Mongos Tolerations for pod assignment | `[]` | +| `mongos.podManagementPolicy` | Statefulsets pod management policy, allows parallel startup of pods | `OrderedReady` | +| `mongos.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `mongos.config` | MongoDB® configuration file | `""` | +| `mongos.configCM` | ConfigMap name with MongoDB® configuration file (cannot be used with mongos.config) | `""` | +| `mongos.extraEnvVars` | An array to add extra env vars | `[]` | +| `mongos.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `mongos.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `mongos.sidecars` | Add sidecars to the pod | `[]` | +| `mongos.initContainers` | Add init containers to the pod | `[]` | +| `mongos.podAnnotations` | Additional pod annotations | `{}` | +| `mongos.podLabels` | Additional pod labels | `{}` | +| `mongos.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `mongos.extraVolumeMounts` | Array to add extra volume mounts. Normally used with `extraVolumes`. | `[]` | +| `mongos.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `mongos.useStatefulSet` | Use StatefulSet instead of Deployment | `false` | +| `mongos.servicePerReplica.enabled` | Create one service per mongos replica (must be used with statefulset) | `false` | +| `mongos.servicePerReplica.annotations` | Additional service annotations (evaluate as a template) | `{}` | +| `mongos.servicePerReplica.type` | Service type | `ClusterIP` | +| `mongos.servicePerReplica.externalTrafficPolicy` | External traffic policy | `Cluster` | +| `mongos.servicePerReplica.port` | MongoDB® service port | `27017` | +| `mongos.servicePerReplica.clusterIP` | Static clusterIP or None for headless services | `""` | +| `mongos.servicePerReplica.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types | `""` | +| `mongos.servicePerReplica.externalIPs` | External IP list to use with ClusterIP service type | `[]` | +| `mongos.servicePerReplica.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `""` | +| `mongos.servicePerReplica.loadBalancerSourceRanges` | List of IP ranges allowed access to load balancer (if supported) | `[]` | +| `mongos.servicePerReplica.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `mongos.servicePerReplica.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `mongos.pdb.enabled` | Enable pod disruption budget | `false` | +| `mongos.pdb.minAvailable` | Minimum number of available mongo pods allowed (`0` to disable) | `0` | +| `mongos.pdb.maxUnavailable` | Maximum number of unavailable mongo pods allowed (`0` to disable) | `1` | +| `mongos.serviceAccount.create` | Whether to create a Service Account for mongos automatically | `false` | +| `mongos.serviceAccount.name` | Name of a Service Account to be used by mongos | `""` | + + +### Shard configuration: Data node parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------------- | --------------- | +| `shardsvr.dataNode.replicas` | Number of nodes in each shard replica set (the first node will be primary) | `1` | +| `shardsvr.dataNode.resources` | Configure pod resources | `{}` | +| `shardsvr.dataNode.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `shardsvr.dataNode.priorityClassName` | Pod priority class name | `""` | +| `shardsvr.dataNode.podAffinityPreset` | Data nodes Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.dataNode.podAntiAffinityPreset` | Data nodes Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `shardsvr.dataNode.nodeAffinityPreset.type` | Data nodes Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.dataNode.nodeAffinityPreset.key` | Data nodes Node label key to match Ignored if `affinity` is set. | `""` | +| `shardsvr.dataNode.nodeAffinityPreset.values` | Data nodes Node label values to match. Ignored if `affinity` is set. | `[]` | +| `shardsvr.dataNode.affinity` | Data nodes Affinity for pod assignment | `{}` | +| `shardsvr.dataNode.nodeSelector` | Data nodes Node labels for pod assignment | `{}` | +| `shardsvr.dataNode.tolerations` | Data nodes Tolerations for pod assignment | `[]` | +| `shardsvr.dataNode.podManagementPolicy` | podManagementPolicy for the statefulset, allows parallel startup of pods | `OrderedReady` | +| `shardsvr.dataNode.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `shardsvr.dataNode.hostAliases` | Deployment pod host aliases | `[]` | +| `shardsvr.dataNode.config` | Entries for the MongoDB® config file | `""` | +| `shardsvr.dataNode.configCM` | ConfigMap name with MongoDB® configuration (cannot be used with shardsvr.dataNode.config) | `""` | +| `shardsvr.dataNode.extraEnvVars` | An array to add extra env vars | `[]` | +| `shardsvr.dataNode.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `shardsvr.dataNode.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `shardsvr.dataNode.sidecars` | Attach additional containers (evaluated as a template) | `[]` | +| `shardsvr.dataNode.initContainers` | Add init containers to the pod | `[]` | +| `shardsvr.dataNode.podAnnotations` | Additional pod annotations | `{}` | +| `shardsvr.dataNode.podLabels` | Additional pod labels | `{}` | +| `shardsvr.dataNode.extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `shardsvr.dataNode.extraVolumeMounts` | Array to add extra mounts. Normally used with `extraVolumes` | `[]` | +| `shardsvr.dataNode.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `shardsvr.dataNode.pdb.enabled` | Enable pod disruption budget | `false` | +| `shardsvr.dataNode.pdb.minAvailable` | Minimum number of available data pods allowed (`0` to disable) | `0` | +| `shardsvr.dataNode.pdb.maxUnavailable` | Maximum number of unavailable data pods allowed (`0` to disable) | `1` | +| `shardsvr.dataNode.serviceAccount.create` | Specifies whether a ServiceAccount should be created for shardsvr | `false` | +| `shardsvr.dataNode.serviceAccount.name` | Name of a Service Account to be used by shardsvr data pods | `""` | + + +### Shard configuration: Persistence parameters + +| Name | Description | Value | +| ----------------------------------- | ---------------------------------------------------------------------------------------- | ------------------- | +| `shardsvr.persistence.enabled` | Use a PVC to persist data | `true` | +| `shardsvr.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB® images. | `/bitnami/mongodb` | +| `shardsvr.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `shardsvr.persistence.storageClass` | Storage class of backing PVC | `""` | +| `shardsvr.persistence.accessModes` | Use volume as ReadOnly or ReadWrite | `["ReadWriteOnce"]` | +| `shardsvr.persistence.size` | PersistentVolumeClaim size | `8Gi` | +| `shardsvr.persistence.annotations` | Additional volume annotations | `{}` | + + +### Shard configuration: Arbiter parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------- | +| `shardsvr.arbiter.replicas` | Number of arbiters in each shard replica set (the first node will be primary) | `0` | +| `shardsvr.arbiter.hostAliases` | Deployment pod host aliases | `[]` | +| `shardsvr.arbiter.resources` | Configure pod resources | `{}` | +| `shardsvr.arbiter.mongodbExtraFlags` | MongoDB® additional command line flags | `[]` | +| `shardsvr.arbiter.priorityClassName` | Pod priority class name | `""` | +| `shardsvr.arbiter.podAffinityPreset` | Arbiter's Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.arbiter.podAntiAffinityPreset` | Arbiter's Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `shardsvr.arbiter.nodeAffinityPreset.type` | Arbiter's Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `shardsvr.arbiter.nodeAffinityPreset.key` | Arbiter's Node label key to match Ignored if `affinity` is set. | `""` | +| `shardsvr.arbiter.nodeAffinityPreset.values` | Arbiter's Node label values to match. Ignored if `affinity` is set. | `[]` | +| `shardsvr.arbiter.affinity` | Arbiter's Affinity for pod assignment | `{}` | +| `shardsvr.arbiter.nodeSelector` | Arbiter's Node labels for pod assignment | `{}` | +| `shardsvr.arbiter.tolerations` | Arbiter's Tolerations for pod assignment | `[]` | +| `shardsvr.arbiter.podManagementPolicy` | Statefulset's pod management policy, allows parallel startup of pods | `OrderedReady` | +| `shardsvr.arbiter.updateStrategy.type` | updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets | `RollingUpdate` | +| `shardsvr.arbiter.config` | MongoDB® configuration file | `""` | +| `shardsvr.arbiter.configCM` | ConfigMap name with MongoDB® configuration file (cannot be used with shardsvr.arbiter.config) | `""` | +| `shardsvr.arbiter.extraEnvVars` | An array to add extra env vars | `[]` | +| `shardsvr.arbiter.extraEnvVarsCM` | Name of a ConfigMap containing extra env vars | `""` | +| `shardsvr.arbiter.extraEnvVarsSecret` | Name of a Secret containing extra env vars | `""` | +| `shardsvr.arbiter.sidecars` | Add sidecars to the pod | `[]` | +| `shardsvr.arbiter.initContainers` | Add init containers to the pod | `[]` | +| `shardsvr.arbiter.podAnnotations` | Additional pod annotations | `{}` | +| `shardsvr.arbiter.podLabels` | Additional pod labels | `{}` | +| `shardsvr.arbiter.extraVolumes` | Array to add extra volumes | `[]` | +| `shardsvr.arbiter.extraVolumeMounts` | Array to add extra mounts (normally used with extraVolumes) | `[]` | +| `shardsvr.arbiter.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `shardsvr.arbiter.serviceAccount.create` | Specifies whether a ServiceAccount should be created for shardsvr arbiter nodes | `false` | +| `shardsvr.arbiter.serviceAccount.name` | Name of a Service Account to be used by shardsvr arbiter pods | `""` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ---------------------------------------------------------------------------------- | -------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB® exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB® exporter image name | `bitnami/mongodb-exporter` | +| `metrics.image.tag` | MongoDB® exporter image tag | `0.30.0-debian-10-r53` | +| `metrics.image.pullPolicy` | MongoDB® exporter image pull policy | `Always` | +| `metrics.image.pullSecrets` | MongoDB® exporter image pull secrets | `[]` | +| `metrics.useTLS` | Whether to connect to MongoDB® with TLS | `false` | +| `metrics.extraArgs` | String with extra arguments to the metrics exporter | `""` | +| `metrics.resources` | Metrics exporter resource requests and limits | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `false` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `false` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation | `{}` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Namespace where podmonitor resource should be created | `monitoring` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set shards=4,configsvr.replicas=3,shardsvr.dataNode.replicas=2 \ + bitnami/mongodb-sharded +``` + +The above command sets the number of shards to 4, the number of replicas for the config servers to 3 and number of replicas for data nodes to 2. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mongodb-sharded +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change MongoDB® version + +To modify the MongoDB® version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/mongodb-sharded/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Sharding + +This chart deploys a sharded cluster by default. Some characteristics of this chart are: + +- It allows HA by enabling replication on the shards and the config servers. The mongos instances can be scaled horizontally as well. +- The number of secondary and arbiter nodes can be scaled out independently. + +### Initialize a fresh instance + +The [Bitnami MongoDB®](https://github.com/bitnami/bitnami-docker-mongodb-sharded) image allows you to use your custom scripts to initialize a fresh instance. You can create a custom config map and give it via `initScriptsCM`(check options for more details). + +The allowed extensions are `.sh`, and `.js`. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as Kibana (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections). Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections) property. + +```yaml +extraEnvVars: + - name: MONGODB_VERSION + value: 4.0 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Using an external config server + +It is possible to not deploy any shards or a config server. For example, it is possible to simply deploy `mongos` instances that point to an external MongoDB® sharded database. If that is the case, set the `configsvr.external.host` and `configsvr.external.replicasetName` for the mongos instances to connect. For authentication, set the `configsvr.external.rootPassword` and `configsvr.external.replicasetKey` values. + +## Persistence + +The [Bitnami MongoDB®](https://github.com/bitnami/bitnami-docker-mongodb-sharded) image stores the MongoDB® data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Adding extra volumes + +The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` properties (available in the `mongos`, `shardsvr.dataNode`, `shardsvr.arbiter`, `configsvr` and `common` sections). This can be combined with advanced operations like adding extra init containers and sidecars. + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +If authentication is enabled, it's necessary to set the `mongodbRootPassword` and `replicaSetKey` when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use. Please note down the password, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mongodb-sharded --set mongodbRootPassword=[PASSWORD] (--set replicaSetKey=[REPLICASETKEY]) +``` + +> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes. + +### To 4.0.0 + +In this version, the mongodb-exporter bundled as part of this Helm chart was updated to a new version which, even it is not a major change, can contain breaking changes (from `0.11.X` to `0.30.X`). +Please visit the release notes from the upstream project at https://github.com/percona/mongodb_exporter/releases + +### To 3.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 3.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 2.0.0 + +MongoDB® container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB®. Refer to the following guide to upgrade your applications: + +- [Upgrade a Sharded Cluster to 4.4](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-sharded-cluster/) + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/.helmignore b/ansible/roles/helm_install/files/mongo-manifest/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/Chart.yaml b/ansible/roles/helm_install/files/mongo-manifest/charts/common/Chart.yaml new file mode 100644 index 0000000..3f32f99 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.11.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.11.3 diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/README.md b/ansible/roles/helm_install/files/mongo-manifest/charts/common/README.md new file mode 100644 index 0000000..8dc47f0 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/README.md @@ -0,0 +1,345 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..b94212b --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_names.tpl new file mode 100644 index 0000000..cf03231 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_names.tpl @@ -0,0 +1,52 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/charts/common/values.yaml b/ansible/roles/helm_install/files/mongo-manifest/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/mongo-manifest/override-values.yaml b/ansible/roles/helm_install/files/mongo-manifest/override-values.yaml new file mode 100644 index 0000000..4280339 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/override-values.yaml @@ -0,0 +1,52 @@ +mongodbRootPassword: "mongo#pass" +configsvr: + #nodeSelector: {"datasaker/group": "data"} + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + external: + rootPassword: "mongo#pass" +service: + type: NodePort + nodePort: "30111" +mongos: + #nodeSelector: {"datasaker/group": "data"} + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid +shardsvr: + dataNode: + #nodeSelector: {"datasaker/group": "data"} + tolerations: + - key: "dev/data-druid" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-druid + + diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/NOTES.txt b/ansible/roles/helm_install/files/mongo-manifest/templates/NOTES.txt new file mode 100644 index 0000000..6db3cef --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/NOTES.txt @@ -0,0 +1,74 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/mongodb-sharded/entrypoint.sh /opt/bitnami/scripts/mongodb-sharded/run.sh + +{{- else }} + +The MongoDB® Sharded cluster can be accessed via the Mongos instances in port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "mongodb-sharded.serviceName" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mongodb-sharded.image" . }} --command -- mongo admin --host {{ include "mongodb-sharded.serviceName" . }} {{- if .Values.usePassword }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mongodb-sharded.serviceName" . }}) + mongo --host $NODE_IP --port $NODE_PORT --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "mongodb-sharded.serviceName" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mongodb-sharded.serviceName" . }} --include "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + mongo --host $SERVICE_IP --port {{ .Values.service.port }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "mongodb-sharded.serviceName" . }} {{ .Values.service.port }}:{{ .Values.service.port }} & + mongo --host 127.0.0.1 --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +{{- end }} +{{- end }} + +{{- include "mongodb-sharded.validateValues" . -}} +{{- include "mongodb-sharded.checkRollingTags" . -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/_helpers.tpl b/ansible/roles/helm_install/files/mongo-manifest/templates/_helpers.tpl new file mode 100644 index 0000000..679eaa9 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/_helpers.tpl @@ -0,0 +1,266 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Returns a ServiceAccount name for specified path or falls back to `common.serviceAccount.name` +if `common.serviceAccount.create` is set to true. Falls back to Chart's fullname otherwise. +Usage: +{{ include "mongodb-sharded.serviceAccountName" (dict "value" .Values.path.to.serviceAccount "context" $) }} +*/}} +{{- define "mongodb-sharded.serviceAccountName" -}} +{{- if .value.create }} + {{- default (include "common.names.fullname" .context) .value.name | quote }} +{{- else if .context.Values.common.serviceAccount.create }} + {{- default (include "common.names.fullname" .context) .context.Values.common.serviceAccount.name | quote }} +{{- else -}} + {{- default "default" .value.name | quote }} +{{- end }} +{{- end }} + +{{/* +Renders a ServiceAccount for specified name. +Usage: +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.path.to.serviceAccount "context" $) }} +*/}} +{{- define "mongodb-sharded.serviceaccount" -}} +{{- if .value.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mongodb-sharded.serviceAccountName" (dict "value" .value "context" .context) }} + labels: + {{- include "common.labels.standard" .context | nindent 4 }} +--- +{{ end -}} +{{- end -}} + +{{- define "mongodb-sharded.secret" -}} + {{- if .Values.existingSecret -}} + {{- .Values.existingSecret -}} + {{- else }} + {{- include "common.names.fullname" . -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.configServer.primaryHost" -}} + {{- if .Values.configsvr.external.host -}} + {{- .Values.configsvr.external.host }} + {{- else -}} + {{- printf "%s-configsvr-0.%s-headless.%s.svc.%s" (include "common.names.fullname" . ) (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain -}} + {{- end -}} +{{- end -}} + +{{- define "mongodb-sharded.configServer.rsName" -}} + {{- if .Values.configsvr.external.replicasetName -}} + {{- .Values.configsvr.external.replicasetName }} + {{- else }} + {{- printf "%s-configsvr" ( include "common.names.fullname" . ) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.mongos.configCM" -}} + {{- if .Values.mongos.configCM -}} + {{- .Values.mongos.configCM -}} + {{- else }} + {{- printf "%s-mongos" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.shardsvr.dataNode.configCM" -}} + {{- if .Values.shardsvr.dataNode.configCM -}} + {{- .Values.shardsvr.dataNode.configCM -}} + {{- else }} + {{- printf "%s-shardsvr-data" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.shardsvr.arbiter.configCM" -}} + {{- if .Values.shardsvr.arbiter.configCM -}} + {{- .Values.shardsvr.arbiter.configCM -}} + {{- else }} + {{- printf "%s-shardsvr-arbiter" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{- define "mongodb-sharded.configsvr.configCM" -}} + {{- if .Values.configsvr.configCM -}} + {{- .Values.configsvr.configCM -}} + {{- else }} + {{- printf "%s-configsvr" (include "common.names.fullname" .) -}} + {{- end }} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "mongodb-sharded.initScriptsSecret" -}} + {{- printf "%s" (include "common.tplvalues.render" (dict "value" .Values.common.initScriptsSecret "context" $)) -}} +{{- end -}} + +{{/* +Get the initialization scripts configmap name. +*/}} +{{- define "mongodb-sharded.initScriptsCM" -}} + {{- printf "%s" (include "common.tplvalues.render" (dict "value" .Values.common.initScriptsCM "context" $)) -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb-sharded.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- include "common.names.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb-sharded.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- include "common.names.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Returns the proper Service name depending if an explicit service name is set +in the values file. If the name is not explicitly set it will take the "common.names.fullname" +*/}} +{{- define "mongodb-sharded.serviceName" -}} + {{- if .Values.service.name -}} + {{ .Values.service.name }} + {{- else -}} + {{ include "common.names.fullname" . }} + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB® image name +*/}} +{{- define "mongodb-sharded.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb-sharded.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb-sharded.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb-sharded.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb-sharded.validateValues" -}} + {{- $messages := list -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.mongodbCustomDatabase" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.externalCfgServer" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.replicas" .) -}} + {{- $messages := append $messages (include "mongodb-sharded.validateValues.config" .) -}} + {{- $messages := without $messages "" -}} + {{- $message := join "\n" $messages -}} + + {{- if $message -}} + {{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} + {{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both mongodbUsername and mongodbDatabase are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb-sharded.validateValues.mongodbCustomDatabase" -}} +{{- if or (and .Values.mongodbUsername (not .Values.mongodbDatabase)) (and (not .Values.mongodbUsername) .Values.mongodbDatabase) }} +mongodb: mongodbUsername, mongodbDatabase + Both mongodbUsername and mongodbDatabase must be provided to create + a custom user and database during 1st initialization. + Please set both of them (--set mongodbUsername="xxxx",mongodbDatabase="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - If using an external config server, then both the host and the replicaset name should be set. +*/}} +{{- define "mongodb-sharded.validateValues.externalCfgServer" -}} +{{- if and .Values.configsvr.external.replicasetName (not .Values.configsvr.external.host) -}} +mongodb: invalidExternalConfigServer + You specified a replica set name for the external config server but not a host. Set both configsvr.external.replicasetName and configsvr.external.host +{{- end -}} +{{- if and (not .Values.configsvr.external.replicasetName) .Values.configsvr.external.host -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the replica set name. Set both configsvr.external.replicasetName and configsvr.external.host +{{- end -}} +{{- if and .Values.configsvr.external.host (not .Values.configsvr.external.rootPassword) -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the root password. Set the configsvr.external.rootPassword value. +{{- end -}} +{{- if and .Values.configsvr.external.host (not .Values.configsvr.external.replicasetKey) -}} +mongodb: invalidExternalConfigServer + You specified a host for the external config server but not the replica set key. Set the configsvr.external.replicasetKey value. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - The number of shards must be positive, as well as the data node replicas +*/}} +{{- define "mongodb-sharded.validateValues.replicas" -}} +{{- if and (le (int .Values.shardsvr.dataNode.replicas) 0) (ge (int .Values.shards) 1) }} +mongodb: invalidShardSvrReplicas + You specified an invalid number of replicas per shard. Please set shardsvr.dataNode.replicas with a positive number or set the number of shards to 0. +{{- end -}} +{{- if lt (int .Values.shardsvr.arbiter.replicas) 0 }} +mongodb: invalidShardSvrArbiters + You specified an invalid number of arbiters per shard. Please set shardsvr.arbiter.replicas with a number greater or equal than 0 +{{- end -}} +{{- if and (le (int .Values.configsvr.replicas) 0) (not .Values.configsvr.external.host) }} +mongodb: invalidConfigSvrReplicas + You specified an invalid number of replicas per shard. Please set configsvr.replicas with a positive number or set the configsvr.external.host value to use + an external config server replicaset +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - Cannot use both .config and .configCM +*/}} +{{- define "mongodb-sharded.validateValues.config" -}} +{{- if and .Values.shardsvr.dataNode.configCM .Values.shardsvr.dataNode.config }} +mongodb: shardDataNodeConflictingConfig + You specified both shardsvr.dataNode.configCM and shardsvr.dataNode.config. You can only set one +{{- end -}} +{{- if and .Values.shardsvr.arbiter.configCM .Values.shardsvr.arbiter.config }} +mongodb: arbiterNodeConflictingConfig + You specified both shardsvr.arbiter.configCM and shardsvr.arbiter.config. You can only set one +{{- end -}} +{{- if and .Values.mongos.configCM .Values.mongos.config }} +mongodb: mongosNodeConflictingConfig + You specified both mongos.configCM and mongos.config. You can only set one +{{- end -}} +{{- if and .Values.configsvr.configCM .Values.configsvr.config }} +mongodb: configSvrNodeConflictingConfig + You specified both configsvr.configCM and configsvr.config. You can only set one +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mongodb-sharded.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-configmap.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-configmap.yaml new file mode 100644 index 0000000..0073138 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and (not .Values.configsvr.external.host) .Values.configsvr.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-poddisruptionbudget.yaml new file mode 100644 index 0000000..6138298 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if and (not .Values.configsvr.external.host) .Values.configsvr.pdb.enabled -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr + {{- if .Values.configsvr.pdb.minAvailable }} + minAvailable: {{ .Values.configsvr.pdb.minAvailable | int }} + {{- end }} + {{- if .Values.configsvr.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.configsvr.pdb.maxUnavailable | int }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-podmonitor.yaml new file mode 100644 index 0000000..375c6e8 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-podmonitor.yaml @@ -0,0 +1,32 @@ +{{- if and (not .Values.configsvr.external.host) (and .Values.metrics.enabled .Values.metrics.podMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + {{- if .Values.metrics.podMonitor.namespace }} + namespace: {{ .Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-statefulset.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-statefulset.yaml new file mode 100644 index 0000000..4d4458c --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/config-server/config-server-statefulset.yaml @@ -0,0 +1,376 @@ +{{- if not .Values.configsvr.external.host }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }}-configsvr + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: configsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: configsvr + serviceName: {{ include "common.names.fullname" . }}-headless + replicas: {{ .Values.configsvr.replicas }} + podManagementPolicy: {{ .Values.configsvr.podManagementPolicy }} + updateStrategy: {{- toYaml .Values.configsvr.updateStrategy | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: configsvr + {{- if .Values.configsvr.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if or .Values.common.podAnnotations .Values.configsvr.podAnnotations .Values.metrics.enabled }} + annotations: + {{- if .Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.common.schedulerName }} + schedulerName: {{ .Values.common.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" .Values.configsvr.serviceAccount "context" $) }} + {{- if .Values.configsvr.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.configsvr.podAffinityPreset "component" "configsvr" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.configsvr.podAntiAffinityPreset "component" "configsvr" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.configsvr.nodeAffinityPreset.type "key" .Values.configsvr.nodeAffinityPreset.key "values" .Values.configsvr.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.configsvr.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.configsvr.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.priorityClassName }} + priorityClassName: {{ .Values.configsvr.priorityClassName | quote }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" . | nindent 6 }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.configsvr.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb-sharded.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.configsvr.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.configsvr.persistence.mountPath }} + {{- end }} + {{- with .Values.configsvr.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with .Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb-sharded.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ .Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" .Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_MAX_TIMEOUT + value: {{ .Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_SHARDING_MODE + value: "configsvr" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_PORT_NUMBER + value: {{ .Values.common.containerPorts.mongo | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ include "mongodb-sharded.configServer.primaryHost" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-configsvr" ( include "common.names.fullname" . ) }} + {{- if .Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.configsvr.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.configsvr.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.configsvr.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.common.extraEnvVarsCM .Values.common.extraEnvVarsSecret .Values.configsvr.extraEnvVarsCM .Values.configsvr.extraEnvVarsSecret }} + envFrom: + {{- if .Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - /entrypoint/replicaset-entrypoint.sh + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + - name: replicaset-entrypoint-configmap + mountPath: /entrypoint + - name: datadir + mountPath: {{ .Values.configsvr.persistence.mountPath }} + {{- if or .Values.configsvr.config .Values.configsvr.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if .Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if .Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if .Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.configsvr.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.configsvr.resources | nindent 12 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if .Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ .Values.metrics.extraArgs }} + {{- end }} + {{- if .Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with .Values.configsvr.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with .Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: replicaset-entrypoint-configmap + configMap: + name: {{ include "common.names.fullname" . }}-replicaset-entrypoint + {{- if .Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" . }} + {{- end }} + {{- if or .Values.configsvr.config .Values.configsvr.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.configsvr.configCM" . }} + {{- end }} + {{- if .Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.configsvr.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.configsvr.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.configsvr.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range .Values.configsvr.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.configsvr.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.configsvr.persistence "global" .Values.global) | nindent 8 }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/extra-list.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/headless-service.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/headless-service.yaml new file mode 100644 index 0000000..6f9c7d8 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/headless-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }}-headless + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-configmap.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-configmap.yaml new file mode 100644 index 0000000..8fb01a5 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.mongos.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.mongos.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-dep-sts.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-dep-sts.yaml new file mode 100644 index 0000000..1b89fde --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-dep-sts.yaml @@ -0,0 +1,319 @@ +apiVersion: {{ if .Values.mongos.useStatefulSet }}{{ include "common.capabilities.statefulset.apiVersion" . }}{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }} +kind: {{ if .Values.mongos.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }} +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +spec: + {{- if .Values.mongos.useStatefulSet }} + serviceName: {{ include "mongodb-sharded.serviceName" . }} + podManagementPolicy: {{ .Values.mongos.podManagementPolicy }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + {{- toYaml .Values.mongos.updateStrategy | nindent 4 }} + replicas: {{ .Values.mongos.replicas }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongos + {{- if .Values.mongos.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.mongos.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if or .Values.common.podAnnotations .Values.mongos.podAnnotations .Values.metrics.enabled }} + annotations: + {{- if .Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.mongos.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.common.schedulerName }} + schedulerName: {{ .Values.common.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.mongos.serviceAccount "context" $) }} + {{- if .Values.mongos.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.mongos.podAffinityPreset "component" "mongos" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.mongos.podAntiAffinityPreset "component" "mongos" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.mongos.nodeAffinityPreset.type "key" .Values.mongos.nodeAffinityPreset.key "values" .Values.mongos.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.mongos.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.mongos.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.mongos.priorityClassName }} + priorityClassName: {{ .Values.mongos.priorityClassName | quote }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" . | nindent 6 }} + {{- if or $.Values.mongos.initContainers $.Values.common.initContainers }} + initContainers: + {{- with $.Values.mongos.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: mongos + image: {{ include "mongodb-sharded.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SHARDING_MODE + value: "mongos" + - name: MONGODB_MAX_TIMEOUT + value: {{ .Values.common.mongodbMaxWaitTimeout | quote }} + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME)" + {{- end }} + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_CFG_PRIMARY_HOST + value: {{ include "mongodb-sharded.configServer.primaryHost" . }} + - name: MONGODB_CFG_REPLICA_SET_NAME + value: {{ include "mongodb-sharded.configServer.rsName" . }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongos.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongos.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.mongos.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.common.extraEnvVarsCM .Values.common.extraEnvVarsSecret .Values.mongos.extraEnvVarsCM .Values.mongos.extraEnvVarsSecret }} + envFrom: + {{- if .Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.configsvr.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.mongos.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.mongos.extraEnvVarsSecret }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.mongos.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ $.Values.common.containerPorts.mongo }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if or .Values.mongos.config .Values.mongos.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.mongos.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.mongos.resources | nindent 12 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" . }} + key: mongodb-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if .Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ .Values.metrics.extraArgs }} + {{- end }} + {{- if .Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.mongos.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" . }} + {{- end }} + {{- if or .Values.mongos.config .Values.mongos.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.mongos.configCM" . }} + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.mongos.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.mongos.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-poddisruptionbudget.yaml new file mode 100644 index 0000000..e7d30fe --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.mongos.pdb.enabled -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }}-mongos + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos + {{- if .Values.mongos.pdb.minAvailable }} + minAvailable: {{ .Values.mongos.pdb.minAvailable | int }} + {{- end }} + {{- if .Values.mongos.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.mongos.pdb.maxUnavailable | int }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-podmonitor.yaml new file mode 100644 index 0000000..8b1bc19 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-podmonitor.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }}-mongos + {{- if .Values.metrics.podMonitor.namespace }} + namespace: {{ .Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongos +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service-per-replica.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service-per-replica.yaml new file mode 100644 index 0000000..6364892 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service-per-replica.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.mongos.useStatefulSet .Values.mongos.servicePerReplica.enabled }} +{{- range $i := until (.Values.mongos.replicas | int) }} +{{- $context := deepCopy $ | merge (dict "index" $i) }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb-sharded.serviceName" $ }}-{{ $i }} + labels: {{ include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongos + annotations: {{- include "common.tplvalues.render" (dict "value" $.Values.mongos.servicePerReplica.annotations "context" $context) | nindent 4 }} +spec: + type: {{ $.Values.mongos.servicePerReplica.type }} + {{- if and $.Values.mongos.servicePerReplica.loadBalancerIP (eq $.Values.mongos.servicePerReplica.type "LoadBalancer") }} + loadBalancerIP: {{ $.Values.mongos.servicePerReplica.loadBalancerIP }} + {{- end }} + {{- if and (eq $.Values.mongos.servicePerReplica.type "LoadBalancer") $.Values.mongos.servicePerReplica.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with $.Values.mongos.servicePerReplica.loadBalancerSourceRanges }} + {{ include "common.tplvalues.render" . | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq $.Values.mongos.servicePerReplica.type "ClusterIP") $.Values.mongos.servicePerReplica.clusterIP }} + clusterIP: {{ $.Values.mongos.servicePerReplica.clusterIP }} + {{- end }} + ports: + - name: mongodb + port: {{ $.Values.mongos.servicePerReplica.port }} + targetPort: mongodb + {{- if $.Values.mongos.servicePerReplica.nodePort }} + nodePort: {{ $.Values.mongos.servicePerReplica.nodePort }} + {{- else if eq $.Values.mongos.servicePerReplica.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if $.Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics + {{- end }} + {{- if $.Values.mongos.servicePerReplica.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.mongos.servicePerReplica.extraPorts "context" $context) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongos + statefulset.kubernetes.io/pod-name: {{ include "common.names.fullname" $ }}-mongos-{{ $i }} + sessionAffinity: {{ default "None" $.Values.mongos.servicePerReplica.sessionAffinity }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service.yaml new file mode 100644 index 0000000..4666378 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/mongos/mongos-service.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb-sharded.serviceName" . }} + labels: {{ include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongos + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: mongodb + port: {{ .Values.service.port }} + targetPort: mongodb + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongos + sessionAffinity: {{ default "None" .Values.service.sessionAffinity }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/replicaset-entrypoint-configmap.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/replicaset-entrypoint-configmap.yaml new file mode 100644 index 0000000..7df7c0e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/replicaset-entrypoint-configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-replicaset-entrypoint + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + replicaset-entrypoint.sh: |- + #!/bin/bash + + sleep 5 + + . /liblog.sh + + # Perform adaptations depending on the host name + if [[ $HOSTNAME =~ (.*)-0$ ]]; then + info "Setting node as primary" + export MONGODB_REPLICA_SET_MODE=primary + else + info "Setting node as secondary" + export MONGODB_REPLICA_SET_MODE=secondary + {{- if .Values.usePasswordFile }} + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD_FILE="$MONGODB_ROOT_PASSWORD_FILE" + unset MONGODB_ROOT_PASSWORD_FILE + {{- else }} + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + unset MONGODB_ROOT_PASSWORD + {{- end }} + fi + + exec /entrypoint.sh /run.sh diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/secrets.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/secrets.yaml new file mode 100644 index 0000000..f7839ae --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/secrets.yaml @@ -0,0 +1,30 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +data: + {{- if .Values.configsvr.external.rootPassword }} + mongodb-root-password: {{ .Values.configsvr.external.rootPassword | b64enc | quote }} + {{- else if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.configsvr.external.replicasetKey }} + mongodb-replica-set-key: {{ .Values.configsvr.external.replicasetKey | b64enc | quote }} + {{- else if .Values.replicaSetKey }} + mongodb-replica-set-key: {{ .Values.replicaSetKey | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/serviceaccount.yaml new file mode 100644 index 0000000..676c09a --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.common.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.mongos.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.configsvr.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.shardsvr.arbiter.serviceAccount "context" $) }} +{{ include "mongodb-sharded.serviceaccount" (dict "value" .Values.shardsvr.dataNode.serviceAccount "context" $) }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-configmap.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-configmap.yaml new file mode 100644 index 0000000..ef84cb5 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.shards .Values.shardsvr.arbiter.replicas .Values.shardsvr.arbiter.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-shardsvr-arbiter + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: shardsvr-arbiter +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.shardsvr.arbiter.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-statefulset.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-statefulset.yaml new file mode 100644 index 0000000..c48dfe0 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-arbiter-statefulset.yaml @@ -0,0 +1,337 @@ +{{- if and .Values.shards .Values.shardsvr.arbiter.replicas }} +{{- $replicas := $.Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ printf "%s-shard%d-arbiter" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr-arbiter +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr-arbiter + podManagementPolicy: {{ $.Values.shardsvr.arbiter.podManagementPolicy }} + updateStrategy: {{- toYaml $.Values.shardsvr.arbiter.updateStrategy | nindent 4 }} + serviceName: {{ include "common.names.fullname" $ }}-headless + replicas: {{ $.Values.shardsvr.arbiter.replicas }} + template: + metadata: + labels: {{- include "common.labels.standard" $ | nindent 8 }} + app.kubernetes.io/component: shardsvr-arbiter + {{- if $.Values.shardsvr.arbiter.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.podLabels "context" $ ) | nindent 8 }} + {{- end }} + shard: {{ $i | quote }} + {{- if or $.Values.common.podAnnotations $.Values.shardsvr.arbiter.podAnnotations $.Values.metrics.enabled }} + annotations: + {{- if $.Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.shardsvr.arbiter.serviceAccount "context" $) }} + {{- if $.Values.common.schedulerName }} + schedulerName: {{ $.Values.common.schedulerName | quote }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.affinity "context" (set $ "arbiterLoopId" $i)) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.arbiter.podAffinityPreset "component" "shardsvr-arbiter" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.arbiter.podAntiAffinityPreset "component" "shardsvr-arbiter" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.shardsvr.arbiter.nodeAffinityPreset.type "key" $.Values.shardsvr.arbiter.nodeAffinityPreset.key "values" $.Values.shardsvr.arbiter.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.arbiter.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.priorityClassName }} + priorityClassName: {{ $.Values.shardsvr.arbiter.priorityClassName | quote }} + {{- end }} + {{- if $.Values.securityContext.enabled }} + securityContext: + fsGroup: {{ $.Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" $ | nindent 6 }} + {{- if or $.Values.shardsvr.arbiter.initContainers $.Values.common.initContainers }} + initContainers: + {{- with $.Values.shardsvr.arbiter.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: {{ include "common.names.fullname" $ }}-arbiter + image: {{ include "mongodb-sharded.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ $.Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ $.Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if $.Values.common.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_MAX_TIMEOUT + value: {{ $.Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_SHARDING_MODE + value: "shardsvr" + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-shard%d-data-0.%s-headless.%s.svc.%s" (include "common.names.fullname" $ ) $i (include "common.names.fullname" $ ) $.Release.Namespace $.Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-shard-%d" ( include "common.names.fullname" $ ) $i }} + {{- if $.Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if $.Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if $.Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-replica-set-key + {{- end }} + {{- if $.Values.shardsvr.arbiter.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $.Values.shardsvr.arbiter.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if $.Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or $.Values.common.extraEnvVarsCM $.Values.common.extraEnvVarsSecret $.Values.shardsvr.arbiter.extraEnvVarsCM $.Values.shardsvr.arbiter.extraEnvVarsSecret }} + envFrom: + {{- if $.Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraEnvVarsSecret }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.livenessProbe.successThreshold }} + failureThreshold: {{ $.Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if $.Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.readinessProbe.successThreshold }} + failureThreshold: {{ $.Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + {{- if or $.Values.shardsvr.arbiter.config $.Values.shardsvr.arbiter.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml $.Values.shardsvr.arbiter.resources | nindent 12 }} + {{- if $.Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" $ }} + imagePullPolicy: {{ $.Values.metrics.image.pullPolicy | quote }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if $.Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ $.Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ $.Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ $.Values.metrics.extraArgs }} + {{- end }} + {{- if $.Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ $.Values.metrics.containerPort }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if $.Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml $.Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.shardsvr.arbiter.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or $.Values.shardsvr.arbiter.config $.Values.shardsvr.arbiter.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.shardsvr.arbiter.configCM" $ }} + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" $ }} + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.arbiter.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.arbiter.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} +{{- if lt $i (sub $replicas 1) }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-configmap.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-configmap.yaml new file mode 100644 index 0000000..95ac001 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.shards .Values.shardsvr.dataNode.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-shardsvr-data + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: shardsvr +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.shardsvr.dataNode.config "context" $) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-poddisruptionbudget.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-poddisruptionbudget.yaml new file mode 100644 index 0000000..3cd357e --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-poddisruptionbudget.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.shards .Values.shardsvr.dataNode.pdb.enabled -}} +{{- $replicas := .Values.shards | int -}} +{{- range $i, $e := until $replicas -}} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr + shard: {{ $i | quote }} + {{- if $.Values.shardsvr.dataNode.pdb.minAvailable }} + minAvailable: {{ $.Values.shardsvr.dataNode.pdb.minAvailable | int }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.pdb.maxUnavailable }} + maxUnavailable: {{ $.Values.shardsvr.dataNode.pdb.maxUnavailable | int }} + {{- end }} +--- +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-podmonitor.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-podmonitor.yaml new file mode 100644 index 0000000..3c689fc --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-podmonitor.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.shards .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +{{- $replicas := .Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + {{- if $.Values.metrics.podMonitor.namespace }} + namespace: {{ $.Values.metrics.podMonitor.namespace }} + {{- else }} + namespace: {{ $.Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr + {{- if $.Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" $.Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: metrics + path: /metrics + {{- if $.Values.metrics.podMonitor.interval }} + interval: {{ $.Values.metrics.podMonitor.interval }} + {{- end }} + {{- if $.Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ $.Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ $.Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-statefulset.yaml b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-statefulset.yaml new file mode 100644 index 0000000..f1f04d2 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/templates/shard/shard-data-statefulset.yaml @@ -0,0 +1,387 @@ +{{- if .Values.shards }} +{{- $replicas := .Values.shards | int }} +{{- range $i, $e := until $replicas }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: shardsvr +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" $ | nindent 6 }} + app.kubernetes.io/component: shardsvr + podManagementPolicy: {{ $.Values.shardsvr.dataNode.podManagementPolicy }} + updateStrategy: {{- toYaml $.Values.shardsvr.dataNode.updateStrategy | nindent 4 }} + serviceName: {{ include "common.names.fullname" $ }}-headless + replicas: {{ $.Values.shardsvr.dataNode.replicas }} + template: + metadata: + labels: {{- include "common.labels.standard" $ | nindent 8 }} + app.kubernetes.io/component: shardsvr + {{- if $.Values.shardsvr.dataNode.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.podLabels "context" $ ) | nindent 8 }} + {{- end }} + shard: {{ $i | quote }} + {{- if or $.Values.common.podAnnotations $.Values.shardsvr.dataNode.podAnnotations $.Values.metrics.enabled }} + annotations: + {{- if $.Values.common.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.metrics.enabled }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if $.Values.common.schedulerName }} + schedulerName: {{ $.Values.common.schedulerName | quote }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.affinity "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.dataNode.podAffinityPreset "component" "shardsvr" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.shardsvr.dataNode.podAntiAffinityPreset "component" "shardsvr" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.shardsvr.dataNode.nodeAffinityPreset.type "key" $.Values.shardsvr.dataNode.nodeAffinityPreset.key "values" $.Values.shardsvr.dataNode.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.nodeSelector "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.shardsvr.dataNode.tolerations "context" (set $ "dataNodeLoopId" $i)) | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "mongodb-sharded.serviceAccountName" (dict "value" $.Values.shardsvr.dataNode.serviceAccount "context" $) }} + {{- if $.Values.shardsvr.dataNode.priorityClassName }} + priorityClassName: {{ $.Values.shardsvr.dataNode.priorityClassName | quote }} + {{- end }} + {{- if $.Values.securityContext.enabled }} + securityContext: + fsGroup: {{ $.Values.securityContext.fsGroup }} + {{- end }} + {{- include "mongodb-sharded.imagePullSecrets" $ | nindent 6 }} + initContainers: + {{- if and $.Values.volumePermissions.enabled $.Values.shardsvr.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb-sharded.volumePermissions.image" $ }} + imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ $.Values.securityContext.runAsUser }}:{{ $.Values.securityContext.fsGroup }}", "{{ $.Values.shardsvr.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml $.Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: {{ $.Values.shardsvr.persistence.mountPath }} + {{- end }} + {{- with $.Values.shardsvr.dataNode.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb-sharded.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ $.Values.common.containerPorts.mongo }} + name: mongodb + env: + - name: MONGODB_ENABLE_NUMACTL + value: {{ ternary "yes" "no" $.Values.common.mongodbEnableNumactl | quote }} + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ $.Values.common.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_MAX_TIMEOUT + value: {{ $.Values.common.mongodbMaxWaitTimeout | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if $.Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_PORT_NUMBER + value: {{ $.Values.common.containerPorts.mongo | quote }} + - name: MONGODB_SHARDING_MODE + value: "shardsvr" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_MONGOS_HOST + value: {{ include "mongodb-sharded.serviceName" $ }} + - name: MONGODB_MONGOS_PORT_NUMBER + value: {{ $.Values.service.port | quote }} + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-shard%d-data-0.%s-headless.%s.svc.%s" (include "common.names.fullname" $ ) $i (include "common.names.fullname" $ ) $.Release.Namespace $.Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ printf "%s-shard-%d" ( include "common.names.fullname" $ ) $i }} + {{- if $.Values.common.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ include "common.names.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}" + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + - name: MONGODB_REPLICA_SET_KEY_FILE + value: "/bitnami/mongodb/secrets/mongodb-replica-set-key" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if $.Values.common.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if $.Values.common.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if $.Values.shardsvr.dataNode.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $.Values.shardsvr.dataNode.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if $.Values.common.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or $.Values.common.extraEnvVarsCM $.Values.common.extraEnvVarsSecret $.Values.shardsvr.dataNode.extraEnvVarsCM $.Values.shardsvr.dataNode.extraEnvVarsSecret }} + envFrom: + {{- if $.Values.common.extraEnvVarsCM }} + - configMapRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.common.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.common.extraEnvVarsSecret }} + - secretRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.common.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVarsCM }} + - configMapRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraEnvVarsSecret }} + - secretRef: + name: {{ include "mongodb-sharded.tplValue" ( dict "value" $.Values.shardsvr.dataNode.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - /entrypoint/replicaset-entrypoint.sh + {{- end }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.livenessProbe.successThreshold }} + failureThreshold: {{ $.Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if $.Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --disableImplicitSessions + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ $.Values.readinessProbe.successThreshold }} + failureThreshold: {{ $.Values.readinessProbe.failureThreshold }} + {{- end }} + {{- end }} + volumeMounts: + - name: replicaset-entrypoint-configmap + mountPath: /entrypoint + - name: datadir + mountPath: {{ $.Values.shardsvr.persistence.mountPath }} + {{- if or $.Values.shardsvr.dataNode.config $.Values.shardsvr.dataNode.configCM }} + - name: config + mountPath: /bitnami/mongodb/conf/ + {{- end }} + {{- if $.Values.usePasswordFile }} + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if $.Values.common.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + resources: {{- toYaml $.Values.shardsvr.dataNode.resources | nindent 12 }} + {{- if $.Values.metrics.enabled }} + - name: metrics + image: {{ include "mongodb-sharded.metrics.image" $ }} + imagePullPolicy: {{ $.Values.metrics.image.pullPolicy | quote }} + {{- if $.Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ $.Values.securityContext.runAsNonRoot }} + runAsUser: {{ $.Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if $.Values.usePasswordFile }} + - name: MONGODB_ROOT_PASSWORD_FILE + value: "/bitnami/mongodb/secrets/mongodb-root-password" + {{- else }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb-sharded.secret" $ }} + key: mongodb-root-password + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - sh + - -ec + - |- + #!/bin/sh + {{- if $.Values.usePasswordFile }} + export MONGODB_ROOT_PASSWORD="$(cat "${MONGODB_ROOT_PASSWORD_FILE}")" + {{- end }} + /bin/mongodb_exporter --web.listen-address ":{{ $.Values.metrics.containerPort }}" --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ $.Values.service.port }}/admin{{ ternary "?ssl=true" "" $.Values.metrics.useTLS }} {{ $.Values.metrics.extraArgs }} + {{- end }} + {{- if $.Values.usePasswordFile }} + volumeMounts: + - name: secrets + mountPath: /bitnami/mongodb/secrets/ + {{- end }} + ports: + - name: metrics + containerPort: {{ $.Values.metrics.containerPort }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if $.Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ $.Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ $.Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ $.Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- end }} + resources: {{ toYaml $.Values.metrics.resources | nindent 12 }} + {{- end }} + {{- with $.Values.shardsvr.dataNode.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + {{- with $.Values.common.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" . "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: replicaset-entrypoint-configmap + configMap: + name: {{ include "common.names.fullname" $ }}-replicaset-entrypoint + {{- if $.Values.usePasswordFile }} + - name: secrets + secret: + secretName: {{ include "mongodb-sharded.secret" $ }} + {{- end }} + {{- if $.Values.common.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ include "mongodb-sharded.initScriptsCM" $ }} + defaultMode: 0755 + {{- end }} + {{- if $.Values.common.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ include "mongodb-sharded.initScriptsSecret" $ }} + defaultMode: 0755 + {{- end }} + {{- if or $.Values.shardsvr.dataNode.config $.Values.shardsvr.dataNode.configCM }} + - name: config + configMap: + name: {{ include "mongodb-sharded.shardsvr.dataNode.configCM" $ }} + {{- end }} + {{- if $.Values.common.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.common.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.dataNode.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.shardsvr.dataNode.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.shardsvr.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := $.Values.shardsvr.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range $.Values.shardsvr.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ $.Values.shardsvr.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" $.Values.shardsvr.persistence "global" $.Values.global) | nindent 8 }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} +{{- if lt $i (sub $replicas 1) }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/mongo-manifest/values.yaml b/ansible/roles/helm_install/files/mongo-manifest/values.yaml new file mode 100644 index 0000000..caf1b40 --- /dev/null +++ b/ansible/roles/helm_install/files/mongo-manifest/values.yaml @@ -0,0 +1,1217 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global storage class for dynamic provisioning +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param nameOverride String to partially override mongodb.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override mongodb.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## ref: https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#introduction +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MongoDB(®) Sharded parameters +## + +## Bitnami MongoDB(®) Sharded image version +## ref: https://hub.docker.com/r/bitnami/mongodb-sharded/tags/ +## @param image.registry MongoDB(®) Sharded image registry +## @param image.repository MongoDB(®) Sharded Image name +## @param image.tag MongoDB(®) Sharded image tag (immutable tags are recommended) +## @param image.pullPolicy MongoDB(®) Sharded image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/mongodb-sharded + tag: 4.4.13-debian-10-r5 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## MongoDB® credentials +## @param mongodbRootPassword MongoDB® root password +## If set to null it will be randomly generated +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## e.g: +## mongodbRootPassword: password +## +mongodbRootPassword: "" +## @param replicaSetKey Replica Set key (shared for shards and config servers) +## e.g: +## replicaSetKey: testkey123 +## +replicaSetKey: "" +## @param existingSecret Existing secret with MongoDB® credentials +## e.g: +## existingSecret: name-of-existing-secret +## +existingSecret: "" +## @param usePasswordFile Mount credentials as files instead of using environment variables +## +usePasswordFile: false +## @param shards Number of shards to be created +## ref: https://docs.mongodb.com/manual/core/sharded-cluster-shards/ +## +shards: 2 +## Properties for all of the pods in the cluster (shards, config servers and mongos) +## +common: + ## @param common.mongodbEnableNumactl Enable launch MongoDB instance prefixed with "numactl --interleave=all" + ## ref: https://docs.mongodb.com/manual/administration/production-notes/#mongodb-and-numa-hardware + ## + mongodbEnableNumactl: false + ## @param common.useHostnames Enable DNS hostnames in the replica set config + ## + useHostnames: true + ## @param common.mongodbEnableIPv6 Switch to enable/disable IPv6 on MongoDB® + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 + ## + mongodbEnableIPv6: false + ## @param common.mongodbDirectoryPerDB Switch to enable/disable DirectoryPerDB on MongoDB® + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb + ## + mongodbDirectoryPerDB: false + ## @param common.mongodbSystemLogVerbosity MongoDB® system log verbosity level + ## ref: https://docs.mongodb.com/manual/reference/program/mongo/#cmdoption-mongo-ipv6 + ## + mongodbSystemLogVerbosity: 0 + ## @param common.mongodbDisableSystemLog Whether to disable MongoDB® system log or not + ## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level + ## + mongodbDisableSystemLog: false + ## @param common.mongodbMaxWaitTimeout Maximum time (in seconds) for MongoDB® nodes to wait for another MongoDB® node to be ready + ## + mongodbMaxWaitTimeout: 120 + ## @param common.initScriptsCM Configmap with init scripts to execute + ## + initScriptsCM: "" + ## @param common.initScriptsSecret Secret with init scripts to execute (for sensitive data) + ## + initScriptsSecret: "" + ## @param common.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param common.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param common.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param common.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param common.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param common.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param common.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param common.extraVolumes Array to add extra volumes + ## + extraVolumes: [] + ## @param common.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes) + ## + extraVolumeMounts: [] + ## @param common.containerPorts.mongo MongoDB container port + ## + containerPorts: + mongo: 27017 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param common.serviceAccount.create Whether to create a Service Account for all pods automatically + ## + create: false + ## @param common.serviceAccount.name Name of a Service Account to be used by all Pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r358 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources Init container resource requests/limit + ## + resources: {} +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## @param securityContext.enabled Enable security context +## @param securityContext.fsGroup Group ID for the container +## @param securityContext.runAsUser User ID for the container +## @param securityContext.runAsNonRoot Run containers as non-root users +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + runAsNonRoot: true +## Kubernetes service type +## ref: https://kubernetes.io/docs/concepts/services-networking/service/ +## +service: + ## @param service.name Specify an explicit service name + ## + name: "" + ## @param service.annotations Additional service annotations (evaluate as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param service.type Service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + type: ClusterIP + ## @param service.externalTrafficPolicy External traffic policy + ## Enable client source IP preservation + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + externalTrafficPolicy: Cluster + ## @param service.port MongoDB® service port + ## + port: 27017 + ## @param service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#servicespec-v1-core + ## + clusterIP: "" + ## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param service.externalIPs External IP list to use with ClusterIP service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.loadBalancerIP Static IP Address to use for LoadBalancer service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges List of IP ranges allowed access to load balancer (if supported) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same mongos Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None +## Configure extra options for liveness probes +## This applies to all the MongoDB® in the sharded cluster +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## Configure extra options for readiness probe +## This applies to all the MongoDB® in the sharded cluster +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## @section Config Server parameters +## + +## Config Server replica set properties +## ref: https://docs.mongodb.com/manual/core/sharded-cluster-config-servers/ +## +configsvr: + ## @param configsvr.replicas Number of nodes in the replica set (the first node will be primary) + ## + replicas: 1 + ## @param configsvr.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param configsvr.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param configsvr.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param configsvr.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param configsvr.podAffinityPreset Config Server Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param configsvr.podAntiAffinityPreset Config Server Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param configsvr.nodeAffinityPreset.type Config Server Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param configsvr.nodeAffinityPreset.key Config Server Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param configsvr.nodeAffinityPreset.values Config Server Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param configsvr.affinity Config Server Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: configsvr.podAffinityPreset, configsvr.podAntiAffinityPreset, and configsvr.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param configsvr.nodeSelector Config Server Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param configsvr.tolerations Config Server Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param configsvr.podManagementPolicy Statefulset's pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param configsvr.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param configsvr.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param configsvr.configCM ConfigMap name with Config Server configuration file (cannot be used with configsvr.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param configsvr.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param configsvr.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param configsvr.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param configsvr.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param configsvr.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param configsvr.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param configsvr.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param configsvr.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param configsvr.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes). Normally used with `extraVolumes` + ## + extraVolumeMounts: [] + ## @param configsvr.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Pod disruption budget + ## + pdb: + ## @param configsvr.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param configsvr.pdb.minAvailable Minimum number of available config pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param configsvr.pdb.maxUnavailable Maximum number of unavailable config pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param configsvr.persistence.enabled Use a PVC to persist data + ## + enabled: true + ## @param configsvr.persistence.mountPath Path to mount the volume at + ## MongoDB® images. + ## + mountPath: /bitnami/mongodb + ## @param configsvr.persistence.subPath Subdirectory of the volume to mount at + ## Useful in dev environments and one PV for multiple services. + ## + subPath: "" + ## @param configsvr.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param configsvr.persistence.accessModes Use volume as ReadOnly or ReadWrite + ## + accessModes: + - ReadWriteOnce + ## @param configsvr.persistence.size PersistentVolumeClaim size + ## + size: 8Gi + ## @param configsvr.persistence.annotations Persistent Volume annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param configsvr.serviceAccount.create Specifies whether a ServiceAccount should be created for Config Server + ## + create: false + ## @param configsvr.serviceAccount.name Name of a Service Account to be used by Config Server + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + ## Use a external config server instead of deploying one + ## + external: + ## @param configsvr.external.host Primary node of an external Config Server replicaset + ## + host: "" + ## @param configsvr.external.rootPassword Root password of the external Config Server replicaset + ## + rootPassword: "" + ## @param configsvr.external.replicasetName Replicaset name of an external Config Server + ## + replicasetName: "" + ## @param configsvr.external.replicasetKey Replicaset key of an external Config Server + ## + replicasetKey: "" + +## @section Mongos parameters +## + +## Mongos properties +## ref: https://docs.mongodb.com/manual/reference/program/mongos/#bin.mongos +## +mongos: + ## @param mongos.replicas Number of replicas + ## + replicas: 1 + ## @param mongos.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param mongos.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param mongos.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param mongos.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param mongos.podAffinityPreset Mongos Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param mongos.podAntiAffinityPreset Mongos Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param mongos.nodeAffinityPreset.type Mongos Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param mongos.nodeAffinityPreset.key Mongos Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param mongos.nodeAffinityPreset.values Mongos Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param mongos.affinity Mongos Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: mongos.podAffinityPreset, mongos.podAntiAffinityPreset, and mongos.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param mongos.nodeSelector Mongos Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param mongos.tolerations Mongos Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param mongos.podManagementPolicy Statefulsets pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param mongos.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param mongos.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param mongos.configCM ConfigMap name with MongoDB® configuration file (cannot be used with mongos.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param mongos.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param mongos.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param mongos.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param mongos.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param mongos.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param mongos.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param mongos.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param mongos.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param mongos.extraVolumeMounts Array to add extra volume mounts. Normally used with `extraVolumes`. + ## + extraVolumeMounts: [] + ## @param mongos.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param mongos.useStatefulSet Use StatefulSet instead of Deployment + ## + useStatefulSet: false + ## When using a statefulset, you can enable one service per replica + ## This is useful when exposing the mongos through load balancers to make sure clients + ## connect to the same mongos and therefore can follow their cursors + ## + servicePerReplica: + ## @param mongos.servicePerReplica.enabled Create one service per mongos replica (must be used with statefulset) + ## + enabled: false + ## @param mongos.servicePerReplica.annotations Additional service annotations (evaluate as a template) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param mongos.servicePerReplica.type Service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + type: ClusterIP + ## @param mongos.servicePerReplica.externalTrafficPolicy External traffic policy + ## Enable client source IP preservation + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + ## + externalTrafficPolicy: Cluster + ## @param mongos.servicePerReplica.port MongoDB® service port + ## + port: 27017 + ## @param mongos.servicePerReplica.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#servicespec-v1-core + ## + clusterIP: "" + ## @param mongos.servicePerReplica.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param mongos.servicePerReplica.externalIPs External IP list to use with ClusterIP service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param mongos.servicePerReplica.loadBalancerIP Static IP Address to use for LoadBalancer service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param mongos.servicePerReplica.loadBalancerSourceRanges List of IP ranges allowed access to load balancer (if supported) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param mongos.servicePerReplica.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param mongos.servicePerReplica.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same mongos Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## Pod disruption budget + ## + pdb: + ## @param mongos.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param mongos.pdb.minAvailable Minimum number of available mongo pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param mongos.pdb.maxUnavailable Maximum number of unavailable mongo pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param mongos.serviceAccount.create Whether to create a Service Account for mongos automatically + ## + create: false + ## @param mongos.serviceAccount.name Name of a Service Account to be used by mongos + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## @section Shard configuration: Data node parameters +## + +## Shard replica set properties +## ref: https://docs.mongodb.com/manual/replication/index.html +## +shardsvr: + ## Properties for data nodes (primary and secondary) + ## + dataNode: + ## @param shardsvr.dataNode.replicas Number of nodes in each shard replica set (the first node will be primary) + ## + replicas: 1 + ## @param shardsvr.dataNode.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param shardsvr.dataNode.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param shardsvr.dataNode.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param shardsvr.dataNode.podAffinityPreset Data nodes Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param shardsvr.dataNode.podAntiAffinityPreset Data nodes Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param shardsvr.dataNode.nodeAffinityPreset.type Data nodes Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param shardsvr.dataNode.nodeAffinityPreset.key Data nodes Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param shardsvr.dataNode.nodeAffinityPreset.values Data nodes Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param shardsvr.dataNode.affinity Data nodes Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'affinity' section: + ## affinity: + ## matchLabels: + ## shard: "{{ .dataNodeLoopId }}" + ## + ## Note: shardsvr.dataNode.podAffinityPreset, shardsvr.dataNode.podAntiAffinityPreset, and shardsvr.dataNode.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param shardsvr.dataNode.nodeSelector Data nodes Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'nodeSelector' section: + ## nodeSelector: { shardId: "{{ .dataNodeLoopId }}" } + ## + nodeSelector: {} + ## @param shardsvr.dataNode.tolerations Data nodes Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## You can set dataNodeLoopId (or any other parameter) by setting the below code block under this 'nodeSelector' section: + ## tolerations: + ## - key: "shardId" + ## operator: "Equal" + ## value: "{{ .dataNodeLoopId }}" + ## effect: "NoSchedule" + ## + tolerations: [] + ## @param shardsvr.dataNode.podManagementPolicy podManagementPolicy for the statefulset, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param shardsvr.dataNode.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param shardsvr.dataNode.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param shardsvr.dataNode.config Entries for the MongoDB® config file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param shardsvr.dataNode.configCM ConfigMap name with MongoDB® configuration (cannot be used with shardsvr.dataNode.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param shardsvr.dataNode.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param shardsvr.dataNode.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param shardsvr.dataNode.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param shardsvr.dataNode.sidecars Attach additional containers (evaluated as a template) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param shardsvr.dataNode.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param shardsvr.dataNode.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param shardsvr.dataNode.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param shardsvr.dataNode.extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` + ## + extraVolumes: [] + ## @param shardsvr.dataNode.extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes` + ## + extraVolumeMounts: [] + ## @param shardsvr.dataNode.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Pod disruption budget + ## + pdb: + ## @param shardsvr.dataNode.pdb.enabled Enable pod disruption budget + ## + enabled: false + ## @param shardsvr.dataNode.pdb.minAvailable Minimum number of available data pods allowed (`0` to disable) + ## + minAvailable: 0 + ## @param shardsvr.dataNode.pdb.maxUnavailable Maximum number of unavailable data pods allowed (`0` to disable) + ## + maxUnavailable: 1 + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param shardsvr.dataNode.serviceAccount.create Specifies whether a ServiceAccount should be created for shardsvr + ## + create: false + ## @param shardsvr.dataNode.serviceAccount.name Name of a Service Account to be used by shardsvr data pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + + ## @section Shard configuration: Persistence parameters + ## + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param shardsvr.persistence.enabled Use a PVC to persist data + ## + enabled: true + ## @param shardsvr.persistence.mountPath The path the volume will be mounted at, useful when using different MongoDB® images. + ## + mountPath: /bitnami/mongodb + ## @param shardsvr.persistence.subPath Subdirectory of the volume to mount at + ## Useful in development environments and one PV for multiple services. + ## + subPath: "" + ## @param shardsvr.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param shardsvr.persistence.accessModes Use volume as ReadOnly or ReadWrite + ## + accessModes: + - ReadWriteOnce + ## @param shardsvr.persistence.size PersistentVolumeClaim size + ## + size: 8Gi + ## @param shardsvr.persistence.annotations Additional volume annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + + ## @section Shard configuration: Arbiter parameters + ## + + ## Properties for arbiter nodes + ## ref: https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + ## + arbiter: + ## @param shardsvr.arbiter.replicas Number of arbiters in each shard replica set (the first node will be primary) + ## + replicas: 0 + ## @param shardsvr.arbiter.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param shardsvr.arbiter.resources Configure pod resources + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## @param shardsvr.arbiter.mongodbExtraFlags MongoDB® additional command line flags + ## Can be used to specify command line flags, for example: + ## mongodbExtraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + mongodbExtraFlags: [] + ## @param shardsvr.arbiter.priorityClassName Pod priority class name + ## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param shardsvr.arbiter.podAffinityPreset Arbiter's Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param shardsvr.arbiter.podAntiAffinityPreset Arbiter's Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param shardsvr.arbiter.nodeAffinityPreset.type Arbiter's Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param shardsvr.arbiter.nodeAffinityPreset.key Arbiter's Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param shardsvr.arbiter.nodeAffinityPreset.values Arbiter's Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param shardsvr.arbiter.affinity Arbiter's Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## You can set arbiterLoopId (or any other parameter) by setting the below code block under this 'affinity' section: + ## affinity: + ## matchLabels: + ## shard: "{{ .arbiterLoopId }}" + ## + ## Note: shardsvr.arbiter.podAffinityPreset, shardsvr.arbiter.podAntiAffinityPreset, and shardsvr.arbiter.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param shardsvr.arbiter.nodeSelector Arbiter's Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param shardsvr.arbiter.tolerations Arbiter's Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param shardsvr.arbiter.podManagementPolicy Statefulset's pod management policy, allows parallel startup of pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param shardsvr.arbiter.updateStrategy.type updateStrategy for MongoDB® Primary, Secondary and Arbiter statefulsets + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param shardsvr.arbiter.config MongoDB® configuration file + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + config: "" + ## @param shardsvr.arbiter.configCM ConfigMap name with MongoDB® configuration file (cannot be used with shardsvr.arbiter.config) + ## ref: http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configCM: "" + ## @param shardsvr.arbiter.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: KIBANA_ELASTICSEARCH_URL + ## value: test + ## + extraEnvVars: [] + ## @param shardsvr.arbiter.extraEnvVarsCM Name of a ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param shardsvr.arbiter.extraEnvVarsSecret Name of a Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param shardsvr.arbiter.sidecars Add sidecars to the pod + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param shardsvr.arbiter.initContainers Add init containers to the pod + ## For example: + ## initcontainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## + initContainers: [] + ## @param shardsvr.arbiter.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param shardsvr.arbiter.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param shardsvr.arbiter.extraVolumes Array to add extra volumes + ## + extraVolumes: [] + ## @param shardsvr.arbiter.extraVolumeMounts Array to add extra mounts (normally used with extraVolumes) + ## + extraVolumeMounts: [] + ## @param shardsvr.arbiter.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## K8s Service Account. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param shardsvr.arbiter.serviceAccount.create Specifies whether a ServiceAccount should be created for shardsvr arbiter nodes + ## + create: false + ## @param shardsvr.arbiter.serviceAccount.name Name of a Service Account to be used by shardsvr arbiter pods + ## If not set and create is true, a name is generated using the XXX.fullname template + ## + name: "" + +## @section Metrics parameters +## + +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry MongoDB® exporter image registry + ## @param metrics.image.repository MongoDB® exporter image name + ## @param metrics.image.tag MongoDB® exporter image tag + ## @param metrics.image.pullPolicy MongoDB® exporter image pull policy + ## @param metrics.image.pullSecrets MongoDB® exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.30.0-debian-10-r91 + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.useTLS Whether to connect to MongoDB® with TLS + useTLS: false + ## @param metrics.extraArgs String with extra arguments to the metrics exporter + ## ref: https://github.com/dcu/mongodb_exporter/blob/master/mongodb_exporter.go + ## + extraArgs: "" + ## @param metrics.resources Metrics exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + ## Metrics exporter liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: false + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param metrics.containerPort Port of the Prometheus metrics container + ## + containerPort: 9216 + ## @param metrics.podAnnotations [object] Metrics exporter pod Annotation + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.containerPort }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace Namespace where podmonitor resource should be created + ## + namespace: monitoring + ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitors will be discovered by Prometheus + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/ansible/roles/helm_install/files/postgresql/.helmignore b/ansible/roles/helm_install/files/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/postgresql/Chart.lock b/ansible/roles/helm_install/files/postgresql/Chart.lock new file mode 100644 index 0000000..062bd76 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.17.1 +digest: sha256:91bdebcf473f5da3c018dd74f25fab166d4faaa6be86d492f5caa50fc63f93fb +generated: "2022-09-21T11:17:10.826892958+09:00" diff --git a/ansible/roles/helm_install/files/postgresql/Chart.yaml b/ansible/roles/helm_install/files/postgresql/Chart.yaml new file mode 100644 index 0000000..59a7938 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 14.2.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x +description: PostgreSQL (Postgres) is an open source object-relational database known + for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, + views, triggers and stored procedures. +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 11.1.11 diff --git a/ansible/roles/helm_install/files/postgresql/README.md b/ansible/roles/helm_install/files/postgresql/README.md new file mode 100644 index 0000000..727ba4b --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/README.md @@ -0,0 +1,662 @@ + + +# PostgreSQL packaged by Bitnami + +PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. + +[Overview of PostgreSQL](http://www.postgresql.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```bash +kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` | +| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` | +| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` | +| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` | +| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) | `""` | +| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### PostgreSQL common parameters + +| Name | Description | Value | +| ------------------------------------ | -------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | PostgreSQL image registry | `docker.io` | +| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `14.1.0-debian-10-r80` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials | `""` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` | `""` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `""` | +| `ldap.bindDN` | DN of user to bind to LDAP | `""` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `""` | +| `ldap.search_attr` | Attribute to match against the user name in the search | `""` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls` | Set to `1` to use TLS encryption | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | + + +### PostgreSQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` | +| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` | +| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` | +| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` | +| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` | +| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` | +| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` | +| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` | +| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` | +| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` | +| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` | +| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` | +| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` | +| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` | +| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` | +| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.command` | Override default container command (useful when using custom images) | `[]` | +| `primary.args` | Override default container args (useful when using custom images) | `[]` | +| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` | +| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` | +| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` | +| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` | +| `primary.podSecurityContext.enabled` | Enable security context | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `primary.containerSecurityContext.enabled` | Enable container security context | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | +| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod | `false` | +| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod | `false` | +| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` | +| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` | +| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` | +| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` | +| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` | +| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` | +| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` | +| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` | +| `primary.service.type` | Kubernetes Service type | `ClusterIP` | +| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` | +| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` | +| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` | +| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `primary.persistence.annotations` | Annotations for the PVC | `{}` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `primary.persistence.dataSource` | Custom PVC data source | `{}` | + + +### PostgreSQL read only replica parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` | +| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` | +| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` | +| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` | +| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` | +| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` | +| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` | +| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` | +| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` | +| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | +| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` | +| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | +| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod | `false` | +| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod | `false` | +| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` | +| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` | +| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` | +| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` | +| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` | +| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` | +| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` | +| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` | +| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` | +| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` | +| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` | +| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` | +| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` | +| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` | +| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` | +| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` | +| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` | + + +### NetworkPolicy parameters + +| Name | Description | Value | +| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable network policies | `false` | +| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` | +| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` | +| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` | +| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r327` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.10.1-debian-10-r14` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.postgresPassword=secretpassword + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customizing primary and read replica services in a replicated configuration + +At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object. + +### Use a different PostgreSQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/). + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration` parameter as a string. Alternatively, to replace the entire default configuration use `primary.configuration`. + +You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string. + +In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +- First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +- Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL primary +primary: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +readReplicas: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.auth.password=testuser +subchart1.postgresql.auth.username=testuser +subchart2.postgresql.auth.username=testuser +postgresql.auth.password=testpass +subchart1.postgresql.auth.password=testpass +subchart2.postgresql.auth.password=testpass +postgresql.auth.database=testdb +subchart1.postgresql.auth.database=testdb +subchart2.postgresql.auth.database=testdb +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.auth.username=testuser +global.postgresql.auth.password=testpass +global.postgresql.auth.database=testdb +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```bash +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/). + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/postgresql/charts/common-1.17.1.tgz b/ansible/roles/helm_install/files/postgresql/charts/common-1.17.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..d2e66eb702c03db2f57932528265520573877492 GIT binary patch literal 14611 zcmV+uIqb$CiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBhciT9U=zP|%s7t3eb|#`EJ8@<_o4xBenHit%PJHcja%Q{d zwjmOd(53)30LsxMxxf7$yh!k+hb=#{Yko*%5-1c3RfR&KP>9%c%4qLsg18LlXqxLw~UQ>_z_%(7yp(YELF4;(zF0 zyRB;HzL5t}$|RD6QE|`#h>*l%#^(ocOh+6E$+Jji9CuJkK42~gqX#hC?VvQx{~Uya z7vZ22V-azZO8xE-ejr1R_#FO@lPPHSf)V2|<}6DI9fM&!NZ6P}FvRi_V+uneDVh?9 zRCy700NziCFn_-hFW|uT!C>FhhjhS%Yx>Lk2!jz6X7kGZjcq|S& zUE8rv7bZAKG4FIi;%S;7i91~wWi(P0>U3RSJ5xlcM1*3lt5c*p^2&Y$$au0szdELx zZCkL6M_7YfUb853d3hQ7-R{VFiVq+$BiWf%2K=sB_u<*VWA*g;=+o`6mRyq%=crgdAe|c=LhilGlUm-4qw0KUjSH& zIg*Skd)j+#x_RKe0Z_f-h>kIY=5ra6YDYBzU*PwQNnHJ^s$&|v*O!=#C-MLW9rvMI zAIAye@wLWL0l3+5G%ctqZaP|h7!`ywoZv_@{vPwGC3yE+8p{Qvca4F|$4m)(0D+N~ z8yP_}c&1Ne1o(FdRimy=+jISOHLVeP0C6I48LHuzb)cFWC8(C(3uv!3V9H`g|DO6y zC0TzwQWjVAVZ^2>qnOHkEbu4E(T|F0e?UW=hycP5#$d00QMIYWpJV`!Rvsr01=sCI zITCR2QdLD+A|VJMEI#CAYe;xibf0#hj_B1@b3t__)K2gc1=dM>A&t|b$pbyO+tsnq z6-78Lk=Igj2fhF$G*;am^vhyu8JaQ)+jyGFxkt2}YEeizp>hNPO4INn8)8ne#3CfD zmsdJ~??m8JzSf+l->Z7&(FLAAg-2Ry75(djQpM*?NuYsMat=Q!G*x;ga5L z;Bs9s%!?Oftv4iD$`Ur7f3LJ}nLZOH)cORkIjwC@I7^e<-3Vtf3*Me^u0P*)n}u+5 zBKRktGgp?s>q}pQ>|IgvR+hU;lXaQ9p2V#pZwn-BW5RVihKOlt19pfvQ*O=-@g#hi z6|(vDUw!=_p%e{CvOy2P;`RSvu>1U3W&OY3fBx)Y{eK(J71sZ9fLkGn* z&;s`doxy}mqnD&ad`13J@IdnbP;FtH#S_6Xs+~a^43rp zXseOR72O(^ZAftFSVW2G4GtsZg4VjS@;oB=IOq#L8;QuW_5CvR%ImA`v>~ zScnw~?$4x%A*n&?UguK)WMVzsGHw@d{@Sm^UaFaUSh!%dJ+HKEWr>_o=fMXj( zv!Yb-A&S<}H`kxs1LD@pujTU7z((N(#1Li*zkD zH%GO@ljAeYXC%Ta(hjA>Ral*b{Y{XN<1rDEug`z2bvH-Gzbn>eRH6hKt=!zGcE z%8FWnu~dEvmlG0AK#>{|DgaSHajcz_D1qSf*RKIU9^@K!Y_zFDiwM7MBme=&WBf@w zc7aeKF^YkWv>SHRjstxC3PT*BOke|<(6MGu)$a>z0%ToTLgC-{!aX(nWQkN50D^!} ziTMacSW{%gG8zYfIcJKKpS))<#K7^C&2S8t6Kn({LJ}ox{8+%0P%_PE(`Y<-vZ7I) zr77c5c(|72!J|y@d3*MAzx(HZ?mYSV|9<`FldoSN1+ZNr4}1a1&Lmfqu%3DaN3+i& zZo6w=*?-5U?*G+{wXxW*KP->`+8aC@RQ$isUhM5X_=g< zSvuw@R&tyP^{?57OOX&d9NxypfSe!+3ZzzGLu|^0oz71Jjq!omsGOmM#M&5mqZ;V< z;oxO$+oUKYY-CNS8cuMiLFU^@`rG$<7b!(@5=s_2tor=`0)ZorW&YzWn&JS05y45U ztC*tQem^MY=Ng8Co5FCg28M$i!x}19lFcCD9K5Jw;&&_GYHJvgItg~lo7&1IICXKu zu0cos2g{5+rku@4 zjA70)4pGK66$<&S2)VK=xNSUWZ}g=#B}*@ZUno?cw@hNA9PkWNKqHAcaIB0glq9$MMN`evrQ#4l$KrxPuVJ0Vd?%rPDRN0J>DkDWm)tebhvMGirL1Zd6g7#lC zE+?1+p(DnphC3rfRr_qHkMhKTWPYqBW|1hJs~Ka8IZApHcG;DzCppW82^JH^WOss- zsR@$r5+O1ydRs;3yN3O`mlKlUZayxyyd4>Fg)V+qsbu3NO2z67`2|LZB)7h0{qSt? zsoQ^KipHCZ{r|&q`|sJa=P#=E-~Nji5BA?}JZsp0p4TPiEL93nu?pB=p_=(&8A<;a zz&03bL;rcom@MMtoc$vw*p!CA?A|+*qHQ)Vw~*Uu*0D5KHj|nG!@SMVeew&ZOc2Rh zio1_R1&PL$QtFMjr1kwd&4COyHo-t}7`foDdKX@{)M$-5@P3m~1(@&b8A0%!=zS*+ zbm&{DSd}ii+wE>64atF7P;nKPo@K7rEV{rPjx|H%eLmCHFeuOhwqu-f93hG01JDkZ z5?^pkW6V#oBr&ywbKC4JVAr%LUSZnYFmC1B!PE9{yE_F}mt#~`Vet}Ho{{OjS4^qV zQ#7sen$LmuV1mz5K8?D06k@G~mw?h#ja%t$*vN?GzdEI+O{YrL?3eVG5FsPj_9I6< zFgtoYs$@B+U|MQex8ja&>w_H?JW6P6j{mV*L?Wq_j$LUhbJ$Jg3z)nn85?Ky%A#-y zdpF>-oXPE1x3E(>+5IjJv@6jbTxN%~vD^j(q_!&{)5N+4OvlSkfdE3Eer}&&=e~&9 zt#{`h3F}>(Ya4$W?0*}&vAG4nQu}|fTRs2VfAQe|y_IL3_}{;2zlFpwMA3zk2u#(K zq#Za!DB~+dk_;1Uw&hTg_yNL)C&zC0N%*ec`W$5$(6*9m6JfTvttZrewSU=fCydMD zoBgZmpFHurksi$45gF>)71#(61^kH!J;B6If618Iiff>r6)`mOz z(}+MRlHHQZ0VGtGq3smkMhQc5|5^3BYfiIjy}>Vv#TsVH#jCm#6H*|#G6QU6fYJlX zWPCqQG3fma#m{z!8OJLq2Dcl1Vxa_P4qwKy^=clYZ8t>7{Auk7nOca<2qgk94%RTl zxmuKk?b=a-gwWdNTJ$`wKwFcp_3DcQkk%*fjZ-%#ZiKn)P7y_8y(3{aw7g_#T6@0# z;S2;BQyh0x1u`O$vY|kAQB9}BEa)s@{OX-kJSx(9`K>8U6|+e6GBs{JOXS{GKA>G6 z%<{oiuzxh5)ii*GEY^A+q!w?S)6-tZ7q90QwCZ*#jjEa;@jaeH33N~;M3iJgVs5rV zxKi^gRm+Qe`syLNr17cS|MoEM?fL%)gPQ&SZ2$T52mAjvo;Cdco-HpVqA}vJIq)r7 z)q&+DSKR1;`XTJx#%Vv#Z0Dj0PNt!lnB0X6s_5^wPQ5+7!Av7-SGnzUXQCpUj$-6G89`xt8 z{?zrqVTui^|E2z){k`W^{r~(S{_l34HT3_RPpQ&S%A%##L81eVb?WF=HRbD=Zc)S@ zh1PYoHgV>sD*0}PIVUlOFUoAA5$x~jpJaNLjYi}X1l_t#Z;(9V7)gtiK~v%EqMng3 zq2|MRfx4N)mUX~RDD_Rx?sv*|_B&#JJ4eJ9;5bcCgdzB20Ku=p)jNf9!i};dnZxfH zO2~+)g-M#~&W4?j*g)6irD{WAF7gm`!KT>v$`z|dvusJolvpQ}oDc!qmQ=6bo~g3h z6PDs=C+r-LTr8LYk_Xv)v?qmwOVeG4(JSOFwu@=53f2~qj!)djoL9P)BZ##EE79Fw zLO!B4B4A~H_3+HeVW5O-`P2Q>%(j zHSV=6LXBUqElQ10H!V_!Ny0AusM;dWlMQobhCDGgN7|-YB1yL>RHOyaXN4j*9qQ;) zmp^~`q-^s5g6TZ}xoCs80;&sdN-ui(*^_svITdR5 zNEJ55?!f*Qu!|zp@4Y({U`%G1x@AQsFt=Iua%;moFsh2UsgY3}f}RGiaUyPqBBruVbUJX$khmaF~Ju5~_#HV~);`ek)~ zv8GPF-kPh$u00Z6<@;iO*O64Uo#$e2&D5v9jpc9-nwdE-P}MsZ zc)pU_(Mcq)p><3W$_uRHn1ct^ayzQ!0?(U|1itlrPO6a87`p0m0oS9MbEyJw@X{oC zF9~jg+>}$z)+DD~qa4;)hp1Zo5v8d+DBwk2DqLOyp)vlnsMez#>Gt5srQ+s5ESI+Y z5GUinvE&5Lu;ML!PBGya#dCLbt@SkIhIv&NB@1XYEkAf8f~ zg}4fbv?EVhRu|&0V?VWk<|2!H72ul=ilh@{D`fa-wcpsvXiijHu(8v7A)IQ_Namij zRLEs}d->H-k2!Wc{GzG5#boi@A3Sv zoK2Nh_pjg)0fZIR?@eIEa;2@=4=#^=Ys0#PYWjQ6z}C}rV;>f_ZieSIl1Q(%62>mF z6oQ$WzMZZ9sL>f~9Eo;(cfweK@XkDXfPE9rhL8E5{fmCe?BI;C=F*=VvPe4{t0CpW z`@+EfCHPMK5k?=V%-yOy%vQPwXo+<%5? z66&0NT;Y!(G>b>gL$L2#r>JXEXr5O=#m9nW6FzvWB%5(X&R%k7;%HW?# zp7&%sVV9W0wwfajlXQY$u(Mo80R1A|j-wgWD?L zG5&OFHtG+PX&KW4E5U zrRvSKNdaor{Cm5~Exue_Y2g-$*46b|_R}{0U2g-GitJR}%5Dz#%a}utzXZPoex3#2 zu2@#r7&6V6wf5#=GP!?GUH=!^95yurEYbh{{eCU}t3#?7g`$5SJ*SeUCd82&bqz#>60r2(f%SYQ6*$_uE(Zq3%2QO^*^XKLOWAnA}&e$vrcC8Lw2EPeLm2!L; zeAX`SP?a!;{=a_x8o(DAB7ygxL02cat2$|3Wh99K(;3VVH``|>yabuJ`=ujSp@JDU zHx$!(H{S?eJ{A>Fo*Ptu(IHT2LbTUIs2@5fw6b??hrJrwO?ZR^{%ftO_BM8_xv{Ek z_>)8{v?3i=TZwHj4$BaB8Vq9nXW_eSh0+q2A&aSZ#M#tMmSzN(j@-hHn>E7cpjjpdDC7!+|gR`%?cQ{YwBvypr^{7v7fT-jd8i`mi4TdKwDcyPK68#Sp3 zyXSaDu!+RSR1$8(A=n5Wei+5y-6yO<#m$28drk z<8p`WOpRZ$_h_nUh_JvnYj%4b4ZU68QcMOgZcAE^s1_j|?vnszv};SR!gB4Kl$E%h z3p^i``A|petjmd|_Nvidw)=VmKL5t?40D0MS1%7R6@nV`rg?>555&TxF441!*d^=y8VBNI3;wv!6m>_|8KwM|J{A|aR2wMJZsqh z9}Qyw8nR5<3tu=(5`78_jTf2yIZxU0@6mWc`tMW5 zLhl`$Q?#1B`-|BN{wtv|1YQ2iFYzBAeGwD1`+WZ(FzH4=9-h8Ee*3orI4yb)F_t)z zIEHOyl6#56zkWSXZ&aDDUw58@H92U0_k~I5XSU3fIdH7HiKz{rLo-I=I<@WHV5z>K zOb3QxIwPD>b8JEfXSd%#VQGW8aF;oB??tfm zevOb2h{haUYcg1(|DX2<`xX8FV*la%-|akW=zkjtgyG0>MUfe=xH1TFb4_9Gimz2- zr3!w_0k%1ZT;kU}n9Bw6msbp{g@BY#m)LS9?4#(cOR>(ejs0*j@Hy7G`$Kdswx(Sd zW!4m#9_E4RMN96`91=xc#uZsZcx_gF2d2NRbL6VT_12)3qc06BngW6aSvyu$f@ebj zR{~_pRv^T~p9JP79FNGSmB3u!d1xwCAi0P^KbEjq8x`jlDs7g-+O%20xG@|aRnPtC z47AlVWO|Qv$9=Q}F-a-QCxvQ}eWL2TS|=*9*4@^S$Oh&OmLXR7!Rp_d;~R?&nB_V^ z=Qv1f_vJab4Iz|%Ok!h5;(`ne*(Z_^#OK}t0pruwSsBQ9BAv^P@ydz6h01NIsJP4_ z3)IyesEws)mmF+ifuhGT)llF9rjf1c89V3iu0$d%0=Y7vO%1%2+@dS%jlI)u&4a%B zU^qA{jDqhfY#$NVu?4g*`5cwzYYFvk@_1jD?k*zUjQ3hJ_oo79LFBfAO$&VQrO)rg zk}iig*91uj|tz1ZKa=6`&#_v~T)cPr0T)_*@Lu6X@w z2X#mPfj3h&Q}CE@4pajYiUEl&c)Xtub8F z3b`B@>EEzc!eOT#;J#@mgrjtw$QyM-RO9Xa?wbpQeC-o>vJfmFLW@wfb6-RG6_~K7 zbpvE-!f7$Z{--!{R+#}e=F8cjF|Z2WS8(Pyunq98;>cMM%!b`Kt`lpF`xRX{ZBVZ1 zz;R&jx%TTFDr{s5ykuA*ZZ*o)%^p}jN zuO*vC%zuBkZn!1>A4bRQhW$T-{fbmX|7G(|Aj@+1 z#|h;3w%rqC(7vWG$ZzU~971*SuICW4^*7=WD#L1V2rYKwltHw(ah5ujDxGR~D&04S z&}$^o5Q%F2*uK6khte5#=RE4JB=c=4q{B>3-W0sz!O8ie#SNujziDT)b^Ikh7E9Km zH{ks;J_sL$d+8Rls_uV)Tg+7Oh3q=5=5Urf;}mCAx?I8K=6!ON8SP&Avt!iv+&iAH`ui-|PjVs`N6`z0w`)&K)Y$w(j z_pAEeTA*Cl_h!M~bKl#_9-1}_`X)Uz8(!zM_FmCGM7XS6UcK`2-!QkQi=dCrHH@>E&bKd4WO`Dqdm*aeu6_y72ZTpyX7giqatNNE3VO-a{ zq=DXZ-_puXjCRxQCS4deBVO0`To}3f#^bq~&FF7#G*@>K=GyRn$8lZtS?vEwnUG_S zujvF@68|;Wulj#>`-6w`f4B2o#s70+xZ)<9Kpw;08&6PyQ@cB;D04lZkk{7DIEI=D zyq0&!198nXFb$9zT}1inGB58hzm`K)+TOhtg`;Ch8{S%JL3B}bYmJVTusqq^p*N*o ztdX+q%lvxr_H}K8Y-}jSOdX4|d&N@78_2b|&>zKO^vZsOh9;`xv4Gg?IusT%=Sm}R zv+jk(J-bhihO3#+F^))#^E>XaLQKtw_Oc+0=b|~U&TN^$p}XOzl#}}c&%12Yb>9}v zYps{&>&)Wz-hYR!xq+8Zi?y4=DoVU{Qd!)CvN>H@#I)S0)L6=E$74^!Df))|rL}B| zZr|BZ9`!r(H7%b(*YiXyhv!XnhET27{I0>E)kc;F%r>u-T_W@C>X>@CH2aWt%H3J8 zo^EYpUBDZ5Re4=plVh&tv|0e@nw~3vurA-Y_HXj{21nhW?Ad>Kn?CS&08vlC+hzYqL;ui#MQ2d}H-$s-$f0e#2bc z*q_a5H=odJYPu@aC7?E}@BG%%tE;@mwe2^|F0Fl9!ZIx^kygaq=Z8h*?#6$-*#FCM zOs;v?=l3peBJ ztr2rIx2&nQnn$)Z;I0JZ0?$R5>arC;`pOB!$-i*?0dvvfqALOOs)RFBCop#*wfnH8 zvM~7^wu|65eI-(*A3;+P#m>@N^$Pg+#;!TnV4J(W)vjJ84y@`dZjJ)02MNYR_|C_* zNR_(~mlMe^x-xl6SpFpNVJ0W*kEdnfDQ1Ub>vjjA!nWbPF-l`;{rJJu-IUyumfr#;lO`X}^7#O&x#C7L)#Hf};zO@4{$$ zYy36D$HGxm7<*jzhi zE(}_1FV%p(7W9g9M-|(}<_8(IHSbHTV(%rwc-QNvZ~a-k{`cpyt9t>KtpA_w@Aa$k zU%StrKdk?6rDv`^Zlq097wT6aSB3#nZ?bp2Y<6Q8BISHKuk%#p-#Z}nrA_7?bM?VDn@Zx)#t zNz~j#d)&hcc6~8bnH!_S+&flUFfHdELFl-)Tqt-r%m=!4U1aabZ@6B1HS;#-GI!4xgX6?eqvOK zgPqDsMaEbjWt?MLI2D7S>bOBP&NYYC7#07CYpIQDy3%|0hR!}9?gvS>pi!)0;C-=N zj!Y55+6TYTU#N>l{7(Er8&*1|af7)UIZJ_XrzOy<>;W*~AActv*VS5nK{JYDc--wO zvq&wsUOrOAtrk#NAR32i6;`lDG5X#+N5C2-?#H^u+W+AMCsQ(}jN{F>vBduGKi{q1 z|GT^Y;>Cmge;dyhoFJ(*l~BBGI6zH16owf|Vx`4X6kVV(7GY-#-cN`Ck)zSJ5I$Ln0Sr$GVok2{v=!9b;d-`94ekUCMi}&=u?!{!>Q~$U>#fX5BYy?<=KK)L{c;* zP8xn7L$0irBQ=qAI*0J{-*GZ6zy9a;M9Neg^m=OA2*soqvq~ZyP8`MEId8WTUY7<48l+0Cl__Y1d)u-55OUA8?kMM5u5hxZ`GJ?afrmE^9U6B zkT*PsELC=DUP|MIQu4vII~#N^2#pUwt>DS{N0fG^SR!RTE0hhw4NE5VL4>0$NlYxH z-qCiorgLK&LGX8+Bn&<>p2UGcVrvVIspKrq^fpANqm{HVL{iRXB*p@eqq+0+seYxH z3%Hz+XaW(U$^_Exe|^wlh7* z)I&2w68+6CmMr&Mvg(kLh^1IBreZQ0VQuIN9GOLnYIX|_MYF9%+^iW%6S=;?xgV1J*LUv%>zfeHRTsQEJPp*p{}t?< z!$MRhh{q*V)Qj6jC1tTAN{uQhiybCC^_xo4gjtSC#HJ~un93ZX!mn~(W|dY{3{h%S zxj9s{KpmkJ4N0=1NHqw{m1J!7r%4)@3*7ZZX%h|jqaAs44wd@0)8KivyLR-IhLnh_ zb-$;M-Hz`mJ?2r+-9eS??AVRO(1(rLZ;*;>)Oa)*W16uKnIHSL@VOY%C z=S_H`H03PiM0-K=qIyh z=1r)sil>^%aFlT(=N8ya;7PH(Wc=c!j_#^>S_tdmxHg_BC1;q=NQAGdmkv*k9fTVk z_eqMb$)A5`ocxPXi4r(rF&t)+i3laj%!hULr}d|))kqmxho>;1FvRSh=rxpHy3C5X za4+2Z<9f7g$5We7Z#ZXsE#v9Nt)@*qYp;b?gSv+OcH@TIrkICBXJ}ab+?kSQ zFOJyOcsxbp z{-t> za~G5QH)Bu~Pe~x{!g`3J3ryqlh9Q8z*>ER7PB1OYgRKRaD#0{+HEP;tMvMa)JJ3f75<<=F=WRqKm4X{3?tc@aMwrVV%?{!z z{yoD&7IsyQsQEfT)#}=fM?jP^X^;9SIeGuX83;0_wj3FeNKeyxXAVoZZRh0Gubzv) z_p-$-bDqVbV1o*~6{2ce;0H9s$xW>m+M2$?md0jht!0}PiKdD>commDR^E1mD*&d# zS~OVf_OTe~peQp&N?n*=AelCt8HID>w+}^%qfp=kN0RY!Ew!dN7qh5b=6sPuM_a0^ znTMO=jH_>RZHhCN@q~%GFp9eYcSUBU9;g?*oS%zSqjm1ofhzHChB+s38PqbKD93%2 zy_82B8-vH~g{fk5_&r038n_rxnrhq@x=~ldK>wSf2$%L74wHmk+9Q@GE;cV`(@HYa z=@>6yl=W!fqvFw^t?z(hZS9#!R!Ed2S_MBA9RR(ztXQt8P`<(S)tZkl4JNSO*NfirtEvxe6#n9|ai&x)Rd|CnRn3!O zm7kiw&a#m{&m2tW`A^>$qMc!HO7;J_z2yK3n~kHvEw%~ntC8;l&-0C(oP74U#RXnt zgYH`7%d-jVW8qD ze9e(q-_qmeVaYfe0yzig4!v@;5QgCak%rqe} zjc~J~^a?a=shkvF|GB;8m2KHp(H!(XHEC%#Wy{@Tt8GK();%_?d_I(Ek|))pG3JI9 z)i04Bk=BPr@+^>_^Uco}gP5=^iNT;?67{%dp5}{+5y!u0 zn95}CDo3S7$N8gkHEXKXh~?U!Bw6k)y4l^LjWPNP?u8pa0l5#__6IO3_4{Bpso8bvi zM!T-G-;KIaeSznLa_6)u-PAp+wLTPQ|F00@fA`^P_jqo$Y zB`G=l&F2-8%-kf?mF%*Wl0${`I;m*R?QHhcZvCoteV#I+yq-U%5Fw#`ZSx|!@u_$8 z&;%oni4kTc7%%u+L6y5w?1SepFCi zzpCVSzxuG&tuu>fw~)F8sZP%e*~ud*oB$b@nO-Xmv5B2m_-Gd#kv2j|0HZd;(wt?6 z0GYsGv9PNqw*zR%bVK@R+4z+*Nr~L2yyp1no0K;-dX3|!vqIhR=qViT(bzRzxO%?! z*m+m(Ze#%KW9QuwdA9@TUH5V;qqt=JDt9PcQTDtWGp;p$^>H)#&+m4~yzI-jBXcW^ zTZDu_H0G!@cI%GYk%O`!y;&c>9M3x*!yJ#>8pmaG9;0J+eVau=dHrewk_U?h(IR`- zQ=h@&b|*jWius5*C3L*p53_>L7d+ABdxg;VWVfqYE2LOLU(=@>c5bIhYVV#bbiD^kwr+G=u z$i>MiAF|EZ)T3iUKXp11jhBHna3mW0U}5bT zu}5=22K-50F~2oIJG37kCYNX~9s?N_cqp1Z)`x`Tq*lm~CO zIrNF3=-BDlJ#oF=y^J^iHete~?ky0M@PQ@S6qh&7pThCT@m|mko`T=ZTb;Em2y9xb zm)-i*=RT>UM*Dde} zTOq&}XbNbgxAabv*KE$Sk}hm`3^K|6jj$N}Z0=r08)NhBjd}rn7^Nvm{+= z$&X-KKBE*q7^zwQNa>? zovTLr{oB24q(8XAbt5a@H?s7KxoclMqQ~ZVq%_iq@kq^_Oyv7LX1X&OG?dQLnKCdo zsthHVOHmp&n^)DI*8pYz!gOx$570^1D?{}%2%c7``7#JJ&p)mdT`{YBwSh-Y)PGeV zDw*Efv(fu6M71F^Y?vAsW}N`(LYJxTlljIz>-61bUhT$BBOioOtUERGsM|%&D8d;OLS$Dq-bGHuDBnAdXlle{vKmQ$b zyg2E#OLnbT0Hwrs$^?_`=yh*|sN$&@rc6_*SnV1|kfQFnpRUYA?RC2FlcIvLgKT!; z^BPXGy|9GV2>ssMvZcEkxvu5%CD^D2-#NPFhGv z&PIm0j;*V~TFx$0Q>Cm4(({f&=<_3n5nPsPD>qLyJQIFPA4?c1#f9h`h!7bTt2b$E zbSp)cPep_&;)LZXP4j#JNR(^G3=7q~Q&>(K5uK}XEYK)=VdCV=vgUWq$c#c;EW*1? z3_Fu>Ji^?msh`O6_v@65%@Fst@}J#I;BJHj7CTl- z>?b-lgXj7n8O-(+v-~9RC`%wA^gsla@}$mWv^yvAquUFh$V>I|xs~?GF1ol&~^Oug|mnoWS*2{Fv5fMhB z@uvuwR6d-A=1VxX7UhneqGDI57knwRZ zF{MQBhvTC+Z_nOzch$806HTxX#RNH=o4GME$=J~)DD5%k*l2Y^;gS=nZyOP8Brg%i zotOy8$uN^80y(UR0Kb4z#uS3XGdMmA;ML*T@!8YP$K&^ZfA`aS_;`4FdieJJ@tZSv zcM3=E-o8G5fBf$48NB-|9KQVre1H7*^;5vaIPyNFI-dmNK&E;+4?AZzIgD#O_ugw) zEQugNbey3vhB2FAvq_L*J|)6Tkch^egiMLcPf_bm_zhh=^6)%756_)H|33f#|NrB> J+Z+IZ0RY0N)YJd~ literal 0 HcmV?d00001 diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/.helmignore b/ansible/roles/helm_install/files/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/Chart.yaml b/ansible/roles/helm_install/files/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/README.md b/ansible/roles/helm_install/files/postgresql/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/charts/common/values.yaml b/ansible/roles/helm_install/files/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/postgresql/ci/extended-config.yaml b/ansible/roles/helm_install/files/postgresql/ci/extended-config.yaml new file mode 100644 index 0000000..224168e --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/extended-config.yaml @@ -0,0 +1,4 @@ +primary: + extendedConfiguration: | + pg_stat_statements.max = 10000 + pg_stat_statements.track = all diff --git a/ansible/roles/helm_install/files/postgresql/ci/init-scripts.yaml b/ansible/roles/helm_install/files/postgresql/ci/init-scripts.yaml new file mode 100644 index 0000000..66ac9bb --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/init-scripts.yaml @@ -0,0 +1,8 @@ +primary: + initdb: + args: --data-checksums + postgresqlWalDir: /bitnami/wal-dir/ + scripts: + my_init_script.sh: | + #!/bin/sh + echo "Success" diff --git a/ansible/roles/helm_install/files/postgresql/ci/metrics.yaml b/ansible/roles/helm_install/files/postgresql/ci/metrics.yaml new file mode 100644 index 0000000..df26bb2 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/metrics.yaml @@ -0,0 +1,24 @@ +auth: + postgresPassword: adminpassword + username: foo + password: foopassword + database: bar +metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: monitoring + prometheusRule: + enabled: true + namespace: monitoring +networkPolicy: + enabled: true + metrics: + enabled: true + namespaceSelector: + label: monitoring + ingressRules: + primaryAccessOnlyFrom: + enabled: true + podSelector: + "{{ template \"common.names.fullname\" . }}-client": "true" diff --git a/ansible/roles/helm_install/files/postgresql/ci/rbac.yaml b/ansible/roles/helm_install/files/postgresql/ci/rbac.yaml new file mode 100644 index 0000000..ef92a1b --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/rbac.yaml @@ -0,0 +1,16 @@ +serviceAccount: + create: true + name: custom-sa + automountServiceAccountToken: true +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +psp: + create: true diff --git a/ansible/roles/helm_install/files/postgresql/ci/replication.yaml b/ansible/roles/helm_install/files/postgresql/ci/replication.yaml new file mode 100644 index 0000000..1ce6cf8 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/replication.yaml @@ -0,0 +1,5 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct +architecture: replication +readReplicas: + replicaCount: 3 diff --git a/ansible/roles/helm_install/files/postgresql/ci/tls.yaml b/ansible/roles/helm_install/files/postgresql/ci/tls.yaml new file mode 100644 index 0000000..24131f3 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/ci/tls.yaml @@ -0,0 +1,6 @@ +architecture: replication +tls: + enabled: true + autoGenerated: true +volumePermissions: + enabled: true diff --git a/ansible/roles/helm_install/files/postgresql/override-values.yaml b/ansible/roles/helm_install/files/postgresql/override-values.yaml new file mode 100644 index 0000000..61f8521 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/override-values.yaml @@ -0,0 +1,29 @@ +auth: + postgresPassword: "root" + username: "root" + password: "root" +primary: + extendedConfiguration: |- + max_connections = '1000' + shared_buffers = '1024MB' + deadlock_timeout = '5s' + statement_timeout = '15s' + idle_in_transaction_session_timeout = '60s' + shared_preload_libraries = 'pg_stat_statements' + + service: + type: NodePort + + tolerations: + - key: "dev/data-es" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-es + diff --git a/ansible/roles/helm_install/files/postgresql/templates/NOTES.txt b/ansible/roles/helm_install/files/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..710c733 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/NOTES.txt @@ -0,0 +1,89 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh + +{{- else }} + +PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster: + + {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection + +{{- if eq .Values.architecture "replication" }} + + {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection + +{{- end }} + +{{- $customUser := include "postgresql.username" . }} +{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.postgres-password}" | base64 --decode) + +To get the password for "{{ $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.password}" | base64 --decode) + +{{- else }} + +To get the password for "{{ default "postgres" $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 --decode) + +{{- end }} + +To connect to your database run the following command: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \ + --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + + > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist" + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.primary.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }}) + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.primary.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.primary.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} & + PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + +{{- end }} +{{- end }} + +{{- include "postgresql.validateValues" . -}} +{{- include "common.warnings.rollingTag" .Values.image -}} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/_helpers.tpl b/ansible/roles/helm_install/files/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..98eb56e --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/_helpers.tpl @@ -0,0 +1,320 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a default fully qualified app name for PostgreSQL Primary objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} + {{- printf "%s-primary" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL read-only replicas objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.readReplica.fullname" -}} +{{- printf "%s-read" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL primary headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.primary.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL read-only replicas headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.readReplica.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the name for a custom user to create +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.auth.username }} + {{- .Values.global.postgresql.auth.username -}} +{{- else -}} + {{- .Values.auth.username -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name for a custom database to create +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.auth.database }} + {{- .Values.global.postgresql.auth.database -}} +{{- else if .Values.auth.database -}} + {{- .Values.auth.database -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.auth.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}} +{{- else if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.primary.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.readReplica.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.readReplicas.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the configuration +*/}} +{{- define "postgresql.primary.createConfigmap" -}} +{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary extended configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.extendedConfigmapName" -}} +{{- if .Values.primary.existingExtendedConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}} +{{- else -}} + {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the extended configuration +*/}} +{{- define "postgresql.primary.createExtendedConfigmap" -}} +{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "postgresql.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdb.scriptsCM" -}} +{{- if .Values.primary.initdb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +{{- $customUser := include "postgresql.username" . }} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- else }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "postgresql.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- else -}} + {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/templates/extra-list.yaml b/ansible/roles/helm_install/files/postgresql/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/networkpolicy-egress.yaml b/ansible/roles/helm_install/files/postgresql/templates/networkpolicy-egress.yaml new file mode 100644 index 0000000..e862147 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/networkpolicy-egress.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - namespaceSelector: {} + {{- end }} + {{- if .Values.networkPolicy.egressRules.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/configmap.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/configmap.yaml new file mode 100644 index 0000000..2e6019f --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/configmap.yaml @@ -0,0 +1,24 @@ +{{- if (include "postgresql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.primary.configuration }} + postgresql.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.pgHbaConfiguration }} + pg_hba.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/extended-configmap.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/extended-configmap.yaml new file mode 100644 index 0000000..ce5ba2d --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/extended-configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "postgresql.primary.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/initialization-configmap.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/initialization-configmap.yaml new file mode 100644 index 0000000..b73d69b --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-configmap.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-configmap.yaml new file mode 100644 index 0000000..952f3a7 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{- toYaml .Values.metrics.customMetrics | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-svc.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-svc.yaml new file mode 100644 index 0000000..8b6af03 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/networkpolicy.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/networkpolicy.yaml new file mode 100644 index 0000000..d6f8e14 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/networkpolicy.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + ingress: + {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }} + - from: + {{- if .Values.networkPolicy.metrics.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.metrics.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }} + - from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + app.kubernetes.io/component: read + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/prometheusrule.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/prometheusrule.yaml new file mode 100644 index 0000000..92a8d77 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "postgresql.primary.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/servicemonitor.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/servicemonitor.yaml new file mode 100644 index 0000000..ae545ca --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/statefulset.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/statefulset.yaml new file mode 100644 index 0000000..6827f85 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/statefulset.yaml @@ -0,0 +1,639 @@ +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + serviceName: {{ include "postgresql.primary.svc.headless" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + template: + metadata: + name: {{ include "postgresql.primary.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "postgresql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "postgresql.primary.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.primary.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.primary.hostNetwork }} + hostIPC: {{ .Values.primary.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.primary.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }} + {{- else }} + chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.primary.persistence.mountPath | quote }} + {{- if .Values.primary.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: postgres-password + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + # Replication + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }} + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off") }} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + # Initdb + {{- if .Values.primary.initdb.args }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.primary.initdb.args | quote }} + {{- end }} + {{- if .Values.primary.initdb.postgresqlWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.primary.initdb.postgresqlWalDir | quote }} + {{- end }} + {{- if .Values.primary.initdb.user }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.primary.initdb.user }} + {{- end }} + {{- if .Values.primary.initdb.password }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.primary.initdb.password | quote }} + {{- end }} + # Standby + {{- if .Values.primary.standby.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.primary.standby.primaryHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.primary.standby.primaryPort | quote }} + {{- end }} + # LDAP + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if not (empty .Values.ldap.bind_password) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: ldap-password + {{- end }} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.service.port" .)) (default "postgres" $customUser | quote) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.service.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- else if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + configMap: + name: {{ include "postgresql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.primary.extendedConfigmapName" . }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + configMap: + name: {{ include "postgresql.initdb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.primary.persistence.annotations }} + annotations: {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.primary.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/svc-headless.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/svc-headless.yaml new file mode 100644 index 0000000..abd5aba --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/svc-headless.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.svc.headless" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/ansible/roles/helm_install/files/postgresql/templates/primary/svc.yaml b/ansible/roles/helm_install/files/postgresql/templates/primary/svc.yaml new file mode 100644 index 0000000..b7784a4 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/primary/svc.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.loadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.primary.service.nodePorts.postgresql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/ansible/roles/helm_install/files/postgresql/templates/psp.yaml b/ansible/roles/helm_install/files/postgresql/templates/psp.yaml new file mode 100644 index 0000000..d164079 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/psp.yaml @@ -0,0 +1,41 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/read/networkpolicy.yaml b/ansible/roles/helm_install/files/postgresql/templates/read/networkpolicy.yaml new file mode 100644 index 0000000..e49f020 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/read/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + ingress: + {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/read/statefulset.yaml b/ansible/roles/helm_install/files/postgresql/templates/read/statefulset.yaml new file mode 100644 index 0000000..2fbdf90 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/read/statefulset.yaml @@ -0,0 +1,433 @@ +{{- if eq .Values.architecture "replication" }} +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.readReplicas.replicaCount }} + serviceName: {{ include "postgresql.readReplica.svc.headless" . }} + {{- if .Values.readReplicas.updateStrategy }} + updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + template: + metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.readReplicas.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.readReplicas.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.readReplicas.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.priorityClassName }} + priorityClassName: {{ .Values.readReplicas.priorityClassName }} + {{- end }} + {{- if .Values.readReplicas.schedulerName }} + schedulerName: {{ .Values.readReplicas.schedulerName | quote }} + {{- end }} + {{- if .Values.readReplicas.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.readReplicas.podSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.readReplicas.hostNetwork }} + hostIPC: {{ .Values.readReplicas.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.readReplicas.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }} + {{- else }} + chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.readReplicas.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.readReplicas.persistence.mountPath | quote }} + {{- if .Values.readReplicas.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: postgres-password + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }} + {{- end }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ include "postgresql.primary.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.service.port" . | quote }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.readReplicas.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }} + envFrom: + {{- if .Values.readReplicas.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.readReplicas.extraEnvVarsCM }} + {{- end }} + {{- if .Values.readReplicas.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.readReplicas.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.readReplicas.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.readReplicas.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.readReplicas.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.readReplicas.persistence.annotations }} + annotations: {{- toYaml .Values.readReplicas.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.readReplicas.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.readReplicas.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.readReplicas.persistence.size | quote }} + {{- if .Values.readReplicas.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/read/svc-headless.yaml b/ansible/roles/helm_install/files/postgresql/templates/read/svc-headless.yaml new file mode 100644 index 0000000..7600ba6 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/read/svc-headless.yaml @@ -0,0 +1,33 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.svc.headless" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/read/svc.yaml b/ansible/roles/helm_install/files/postgresql/templates/read/svc.yaml new file mode 100644 index 0000000..dd13cfb --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/read/svc.yaml @@ -0,0 +1,45 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.readReplicas.service.type }} + {{- if and .Values.readReplicas.service.loadBalancerIP (eq .Values.readReplicas.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") .Values.readReplicas.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.loadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "ClusterIP") .Values.readReplicas.service.clusterIP }} + clusterIP: {{ .Values.readReplicas.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }} + {{- else if eq .Values.readReplicas.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.readReplicas.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/role.yaml b/ansible/roles/helm_install/files/postgresql/templates/role.yaml new file mode 100644 index 0000000..8b04879 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/role.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +# yamllint disable rule:indentation +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.psp.create }} + - apiGroups: + - 'policy' + resources: + - podsecuritypolicies' + verbs: + - 'use' + resourceNames: + - {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +# yamllint enable rule:indentation +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/rolebinding.yaml b/ansible/roles/helm_install/files/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..88f10af --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/secrets.yaml b/ansible/roles/helm_install/files/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..a46fdbf --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "postgres-password" "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) }} + {{- end }} + {{- if not (empty (include "postgresql.username" .)) }} + password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "password" "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "replication-password" "providedValues" (list "auth.replicationPassword") "context" $) }} + {{- end }} + # We don't auto-generate LDAP password when it's not provided as we do for other passwords + {{- if and .Values.ldap.enabled .Values.ldap.bind_password }} + ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/postgresql/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..2de14af --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/templates/tls-secrets.yaml b/ansible/roles/helm_install/files/postgresql/templates/tls-secrets.yaml new file mode 100644 index 0000000..d660f97 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/templates/tls-secrets.yaml @@ -0,0 +1,27 @@ +{{- if (include "postgresql.createTlsSecret" . ) }} +{{- $ca := genCA "postgresql-ca" 365 }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }} +{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/postgresql/values.schema.json b/ansible/roles/helm_install/files/postgresql/values.schema.json new file mode 100644 index 0000000..fc41483 --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "PostgreSQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enablePostgresUser": { + "type": "boolean", + "title": "Enable \"postgres\" admin user", + "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user", + "form": true + }, + "postgresPassword": { + "type": "string", + "title": "Password for the \"postgres\" admin user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "database": { + "type": "string", + "title": "PostgreSQL custom database", + "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL", + "form": true + }, + "username": { + "type": "string", + "title": "PostgreSQL custom user", + "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for the custom user to create", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "replicationUsername": { + "type": "string", + "title": "PostgreSQL replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for PostgreSQL replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "readReplicas": { + "type": "integer", + "title": "read Replicas", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/postgresql/values.yaml b/ansible/roles/helm_install/files/postgresql/values.yaml new file mode 100644 index 0000000..f5dfd0d --- /dev/null +++ b/ansible/roles/helm_install/files/postgresql/values.yaml @@ -0,0 +1,1335 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.2.0-debian-10-r44 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user + ## + postgresPassword: "root" + ## @param auth.username Name for a custom user to create + ## + username: "root" + ## @param auth.password Password for the custom user to create + ## + password: "root" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials + ## `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret + ## The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), + ## `password` (which is the password for the custom user to create when `auth.username` is set), + ## and `replication-password` (which is the password for replication user). + ## The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and + ## picked from this secret in this case. + ## The value is evaluated as a template. + ## + existingSecret: "" + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.url LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## @param ldap.baseDN Root DN to begin the search for the user in +## @param ldap.bindDN DN of user to bind to LDAP +## @param ldap.bind_password Password for the user to bind to LDAP +## @param ldap.search_attr Attribute to match against the user name in the search +## @param ldap.search_filter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## @param ldap.tls Set to `1` to use TLS encryption +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: "" + search_attr: "" + search_filter: "" + scheme: "" + tls: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section PostgreSQL read only replica parameters +## +readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section NetworkPolicy parameters + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r377 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.1-debian-10-r65 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/ansible/roles/helm_install/files/rabbitmq/.helmignore b/ansible/roles/helm_install/files/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/rabbitmq/Chart.lock b/ansible/roles/helm_install/files/rabbitmq/Chart.lock new file mode 100644 index 0000000..d8cafb6 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.17.1 +digest: sha256:dacc73770a5640c011e067ff8840ddf89631fc19016c8d0a9e5ea160e7da8690 +generated: "2022-09-21T13:15:49.012454903+09:00" diff --git a/ansible/roles/helm_install/files/rabbitmq/Chart.yaml b/ansible/roles/helm_install/files/rabbitmq/Chart.yaml new file mode 100644 index 0000000..da574f8 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.9.15 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: RabbitMQ is an open source general-purpose message broker that is designed + for consistent, highly-available messaging scenarios (both synchronous and asynchronous). +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.31.4 diff --git a/ansible/roles/helm_install/files/rabbitmq/README.md b/ansible/roles/helm_install/files/rabbitmq/README.md new file mode 100644 index 0000000..d13d278 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/README.md @@ -0,0 +1,604 @@ + + +# RabbitMQ packaged by Bitnami + +RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). + +[Overview of RabbitMQ](https://www.rabbitmq.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabbitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | --------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.9.14-debian-10-r5` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` | `autoheal` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.file` | Name of the definitions file | `/app/load_definition.json` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ------------------------------------------------ | -------------------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.mountPath` | The path the volume will be mounted at | `/bitnami/rabbitmq/mnesia` | +| `persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | +| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPortEnabled` | Erlang distribution server port | `true` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortEnabled` | RabbitMQ EPMD Discovery service port | `true` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------ | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | MetricsRelabelConfigs to apply to samples before ingestion. DEPRECATED: Will be removed in next major. | `[]` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r378` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.21.0 + +This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes. + +See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common-1.17.1.tgz b/ansible/roles/helm_install/files/rabbitmq/charts/common-1.17.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..d2e66eb702c03db2f57932528265520573877492 GIT binary patch literal 14611 zcmV+uIqb$CiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBhciT9U=zP|%s7t3eb|#`EJ8@<_o4xBenHit%PJHcja%Q{d zwjmOd(53)30LsxMxxf7$yh!k+hb=#{Yko*%5-1c3RfR&KP>9%c%4qLsg18LlXqxLw~UQ>_z_%(7yp(YELF4;(zF0 zyRB;HzL5t}$|RD6QE|`#h>*l%#^(ocOh+6E$+Jji9CuJkK42~gqX#hC?VvQx{~Uya z7vZ22V-azZO8xE-ejr1R_#FO@lPPHSf)V2|<}6DI9fM&!NZ6P}FvRi_V+uneDVh?9 zRCy700NziCFn_-hFW|uT!C>FhhjhS%Yx>Lk2!jz6X7kGZjcq|S& zUE8rv7bZAKG4FIi;%S;7i91~wWi(P0>U3RSJ5xlcM1*3lt5c*p^2&Y$$au0szdELx zZCkL6M_7YfUb853d3hQ7-R{VFiVq+$BiWf%2K=sB_u<*VWA*g;=+o`6mRyq%=crgdAe|c=LhilGlUm-4qw0KUjSH& zIg*Skd)j+#x_RKe0Z_f-h>kIY=5ra6YDYBzU*PwQNnHJ^s$&|v*O!=#C-MLW9rvMI zAIAye@wLWL0l3+5G%ctqZaP|h7!`ywoZv_@{vPwGC3yE+8p{Qvca4F|$4m)(0D+N~ z8yP_}c&1Ne1o(FdRimy=+jISOHLVeP0C6I48LHuzb)cFWC8(C(3uv!3V9H`g|DO6y zC0TzwQWjVAVZ^2>qnOHkEbu4E(T|F0e?UW=hycP5#$d00QMIYWpJV`!Rvsr01=sCI zITCR2QdLD+A|VJMEI#CAYe;xibf0#hj_B1@b3t__)K2gc1=dM>A&t|b$pbyO+tsnq z6-78Lk=Igj2fhF$G*;am^vhyu8JaQ)+jyGFxkt2}YEeizp>hNPO4INn8)8ne#3CfD zmsdJ~??m8JzSf+l->Z7&(FLAAg-2Ry75(djQpM*?NuYsMat=Q!G*x;ga5L z;Bs9s%!?Oftv4iD$`Ur7f3LJ}nLZOH)cORkIjwC@I7^e<-3Vtf3*Me^u0P*)n}u+5 zBKRktGgp?s>q}pQ>|IgvR+hU;lXaQ9p2V#pZwn-BW5RVihKOlt19pfvQ*O=-@g#hi z6|(vDUw!=_p%e{CvOy2P;`RSvu>1U3W&OY3fBx)Y{eK(J71sZ9fLkGn* z&;s`doxy}mqnD&ad`13J@IdnbP;FtH#S_6Xs+~a^43rp zXseOR72O(^ZAftFSVW2G4GtsZg4VjS@;oB=IOq#L8;QuW_5CvR%ImA`v>~ zScnw~?$4x%A*n&?UguK)WMVzsGHw@d{@Sm^UaFaUSh!%dJ+HKEWr>_o=fMXj( zv!Yb-A&S<}H`kxs1LD@pujTU7z((N(#1Li*zkD zH%GO@ljAeYXC%Ta(hjA>Ral*b{Y{XN<1rDEug`z2bvH-Gzbn>eRH6hKt=!zGcE z%8FWnu~dEvmlG0AK#>{|DgaSHajcz_D1qSf*RKIU9^@K!Y_zFDiwM7MBme=&WBf@w zc7aeKF^YkWv>SHRjstxC3PT*BOke|<(6MGu)$a>z0%ToTLgC-{!aX(nWQkN50D^!} ziTMacSW{%gG8zYfIcJKKpS))<#K7^C&2S8t6Kn({LJ}ox{8+%0P%_PE(`Y<-vZ7I) zr77c5c(|72!J|y@d3*MAzx(HZ?mYSV|9<`FldoSN1+ZNr4}1a1&Lmfqu%3DaN3+i& zZo6w=*?-5U?*G+{wXxW*KP->`+8aC@RQ$isUhM5X_=g< zSvuw@R&tyP^{?57OOX&d9NxypfSe!+3ZzzGLu|^0oz71Jjq!omsGOmM#M&5mqZ;V< z;oxO$+oUKYY-CNS8cuMiLFU^@`rG$<7b!(@5=s_2tor=`0)ZorW&YzWn&JS05y45U ztC*tQem^MY=Ng8Co5FCg28M$i!x}19lFcCD9K5Jw;&&_GYHJvgItg~lo7&1IICXKu zu0cos2g{5+rku@4 zjA70)4pGK66$<&S2)VK=xNSUWZ}g=#B}*@ZUno?cw@hNA9PkWNKqHAcaIB0glq9$MMN`evrQ#4l$KrxPuVJ0Vd?%rPDRN0J>DkDWm)tebhvMGirL1Zd6g7#lC zE+?1+p(DnphC3rfRr_qHkMhKTWPYqBW|1hJs~Ka8IZApHcG;DzCppW82^JH^WOss- zsR@$r5+O1ydRs;3yN3O`mlKlUZayxyyd4>Fg)V+qsbu3NO2z67`2|LZB)7h0{qSt? zsoQ^KipHCZ{r|&q`|sJa=P#=E-~Nji5BA?}JZsp0p4TPiEL93nu?pB=p_=(&8A<;a zz&03bL;rcom@MMtoc$vw*p!CA?A|+*qHQ)Vw~*Uu*0D5KHj|nG!@SMVeew&ZOc2Rh zio1_R1&PL$QtFMjr1kwd&4COyHo-t}7`foDdKX@{)M$-5@P3m~1(@&b8A0%!=zS*+ zbm&{DSd}ii+wE>64atF7P;nKPo@K7rEV{rPjx|H%eLmCHFeuOhwqu-f93hG01JDkZ z5?^pkW6V#oBr&ywbKC4JVAr%LUSZnYFmC1B!PE9{yE_F}mt#~`Vet}Ho{{OjS4^qV zQ#7sen$LmuV1mz5K8?D06k@G~mw?h#ja%t$*vN?GzdEI+O{YrL?3eVG5FsPj_9I6< zFgtoYs$@B+U|MQex8ja&>w_H?JW6P6j{mV*L?Wq_j$LUhbJ$Jg3z)nn85?Ky%A#-y zdpF>-oXPE1x3E(>+5IjJv@6jbTxN%~vD^j(q_!&{)5N+4OvlSkfdE3Eer}&&=e~&9 zt#{`h3F}>(Ya4$W?0*}&vAG4nQu}|fTRs2VfAQe|y_IL3_}{;2zlFpwMA3zk2u#(K zq#Za!DB~+dk_;1Uw&hTg_yNL)C&zC0N%*ec`W$5$(6*9m6JfTvttZrewSU=fCydMD zoBgZmpFHurksi$45gF>)71#(61^kH!J;B6If618Iiff>r6)`mOz z(}+MRlHHQZ0VGtGq3smkMhQc5|5^3BYfiIjy}>Vv#TsVH#jCm#6H*|#G6QU6fYJlX zWPCqQG3fma#m{z!8OJLq2Dcl1Vxa_P4qwKy^=clYZ8t>7{Auk7nOca<2qgk94%RTl zxmuKk?b=a-gwWdNTJ$`wKwFcp_3DcQkk%*fjZ-%#ZiKn)P7y_8y(3{aw7g_#T6@0# z;S2;BQyh0x1u`O$vY|kAQB9}BEa)s@{OX-kJSx(9`K>8U6|+e6GBs{JOXS{GKA>G6 z%<{oiuzxh5)ii*GEY^A+q!w?S)6-tZ7q90QwCZ*#jjEa;@jaeH33N~;M3iJgVs5rV zxKi^gRm+Qe`syLNr17cS|MoEM?fL%)gPQ&SZ2$T52mAjvo;Cdco-HpVqA}vJIq)r7 z)q&+DSKR1;`XTJx#%Vv#Z0Dj0PNt!lnB0X6s_5^wPQ5+7!Av7-SGnzUXQCpUj$-6G89`xt8 z{?zrqVTui^|E2z){k`W^{r~(S{_l34HT3_RPpQ&S%A%##L81eVb?WF=HRbD=Zc)S@ zh1PYoHgV>sD*0}PIVUlOFUoAA5$x~jpJaNLjYi}X1l_t#Z;(9V7)gtiK~v%EqMng3 zq2|MRfx4N)mUX~RDD_Rx?sv*|_B&#JJ4eJ9;5bcCgdzB20Ku=p)jNf9!i};dnZxfH zO2~+)g-M#~&W4?j*g)6irD{WAF7gm`!KT>v$`z|dvusJolvpQ}oDc!qmQ=6bo~g3h z6PDs=C+r-LTr8LYk_Xv)v?qmwOVeG4(JSOFwu@=53f2~qj!)djoL9P)BZ##EE79Fw zLO!B4B4A~H_3+HeVW5O-`P2Q>%(j zHSV=6LXBUqElQ10H!V_!Ny0AusM;dWlMQobhCDGgN7|-YB1yL>RHOyaXN4j*9qQ;) zmp^~`q-^s5g6TZ}xoCs80;&sdN-ui(*^_svITdR5 zNEJ55?!f*Qu!|zp@4Y({U`%G1x@AQsFt=Iua%;moFsh2UsgY3}f}RGiaUyPqBBruVbUJX$khmaF~Ju5~_#HV~);`ek)~ zv8GPF-kPh$u00Z6<@;iO*O64Uo#$e2&D5v9jpc9-nwdE-P}MsZ zc)pU_(Mcq)p><3W$_uRHn1ct^ayzQ!0?(U|1itlrPO6a87`p0m0oS9MbEyJw@X{oC zF9~jg+>}$z)+DD~qa4;)hp1Zo5v8d+DBwk2DqLOyp)vlnsMez#>Gt5srQ+s5ESI+Y z5GUinvE&5Lu;ML!PBGya#dCLbt@SkIhIv&NB@1XYEkAf8f~ zg}4fbv?EVhRu|&0V?VWk<|2!H72ul=ilh@{D`fa-wcpsvXiijHu(8v7A)IQ_Namij zRLEs}d->H-k2!Wc{GzG5#boi@A3Sv zoK2Nh_pjg)0fZIR?@eIEa;2@=4=#^=Ys0#PYWjQ6z}C}rV;>f_ZieSIl1Q(%62>mF z6oQ$WzMZZ9sL>f~9Eo;(cfweK@XkDXfPE9rhL8E5{fmCe?BI;C=F*=VvPe4{t0CpW z`@+EfCHPMK5k?=V%-yOy%vQPwXo+<%5? z66&0NT;Y!(G>b>gL$L2#r>JXEXr5O=#m9nW6FzvWB%5(X&R%k7;%HW?# zp7&%sVV9W0wwfajlXQY$u(Mo80R1A|j-wgWD?L zG5&OFHtG+PX&KW4E5U zrRvSKNdaor{Cm5~Exue_Y2g-$*46b|_R}{0U2g-GitJR}%5Dz#%a}utzXZPoex3#2 zu2@#r7&6V6wf5#=GP!?GUH=!^95yurEYbh{{eCU}t3#?7g`$5SJ*SeUCd82&bqz#>60r2(f%SYQ6*$_uE(Zq3%2QO^*^XKLOWAnA}&e$vrcC8Lw2EPeLm2!L; zeAX`SP?a!;{=a_x8o(DAB7ygxL02cat2$|3Wh99K(;3VVH``|>yabuJ`=ujSp@JDU zHx$!(H{S?eJ{A>Fo*Ptu(IHT2LbTUIs2@5fw6b??hrJrwO?ZR^{%ftO_BM8_xv{Ek z_>)8{v?3i=TZwHj4$BaB8Vq9nXW_eSh0+q2A&aSZ#M#tMmSzN(j@-hHn>E7cpjjpdDC7!+|gR`%?cQ{YwBvypr^{7v7fT-jd8i`mi4TdKwDcyPK68#Sp3 zyXSaDu!+RSR1$8(A=n5Wei+5y-6yO<#m$28drk z<8p`WOpRZ$_h_nUh_JvnYj%4b4ZU68QcMOgZcAE^s1_j|?vnszv};SR!gB4Kl$E%h z3p^i``A|petjmd|_Nvidw)=VmKL5t?40D0MS1%7R6@nV`rg?>555&TxF441!*d^=y8VBNI3;wv!6m>_|8KwM|J{A|aR2wMJZsqh z9}Qyw8nR5<3tu=(5`78_jTf2yIZxU0@6mWc`tMW5 zLhl`$Q?#1B`-|BN{wtv|1YQ2iFYzBAeGwD1`+WZ(FzH4=9-h8Ee*3orI4yb)F_t)z zIEHOyl6#56zkWSXZ&aDDUw58@H92U0_k~I5XSU3fIdH7HiKz{rLo-I=I<@WHV5z>K zOb3QxIwPD>b8JEfXSd%#VQGW8aF;oB??tfm zevOb2h{haUYcg1(|DX2<`xX8FV*la%-|akW=zkjtgyG0>MUfe=xH1TFb4_9Gimz2- zr3!w_0k%1ZT;kU}n9Bw6msbp{g@BY#m)LS9?4#(cOR>(ejs0*j@Hy7G`$Kdswx(Sd zW!4m#9_E4RMN96`91=xc#uZsZcx_gF2d2NRbL6VT_12)3qc06BngW6aSvyu$f@ebj zR{~_pRv^T~p9JP79FNGSmB3u!d1xwCAi0P^KbEjq8x`jlDs7g-+O%20xG@|aRnPtC z47AlVWO|Qv$9=Q}F-a-QCxvQ}eWL2TS|=*9*4@^S$Oh&OmLXR7!Rp_d;~R?&nB_V^ z=Qv1f_vJab4Iz|%Ok!h5;(`ne*(Z_^#OK}t0pruwSsBQ9BAv^P@ydz6h01NIsJP4_ z3)IyesEws)mmF+ifuhGT)llF9rjf1c89V3iu0$d%0=Y7vO%1%2+@dS%jlI)u&4a%B zU^qA{jDqhfY#$NVu?4g*`5cwzYYFvk@_1jD?k*zUjQ3hJ_oo79LFBfAO$&VQrO)rg zk}iig*91uj|tz1ZKa=6`&#_v~T)cPr0T)_*@Lu6X@w z2X#mPfj3h&Q}CE@4pajYiUEl&c)Xtub8F z3b`B@>EEzc!eOT#;J#@mgrjtw$QyM-RO9Xa?wbpQeC-o>vJfmFLW@wfb6-RG6_~K7 zbpvE-!f7$Z{--!{R+#}e=F8cjF|Z2WS8(Pyunq98;>cMM%!b`Kt`lpF`xRX{ZBVZ1 zz;R&jx%TTFDr{s5ykuA*ZZ*o)%^p}jN zuO*vC%zuBkZn!1>A4bRQhW$T-{fbmX|7G(|Aj@+1 z#|h;3w%rqC(7vWG$ZzU~971*SuICW4^*7=WD#L1V2rYKwltHw(ah5ujDxGR~D&04S z&}$^o5Q%F2*uK6khte5#=RE4JB=c=4q{B>3-W0sz!O8ie#SNujziDT)b^Ikh7E9Km zH{ks;J_sL$d+8Rls_uV)Tg+7Oh3q=5=5Urf;}mCAx?I8K=6!ON8SP&Avt!iv+&iAH`ui-|PjVs`N6`z0w`)&K)Y$w(j z_pAEeTA*Cl_h!M~bKl#_9-1}_`X)Uz8(!zM_FmCGM7XS6UcK`2-!QkQi=dCrHH@>E&bKd4WO`Dqdm*aeu6_y72ZTpyX7giqatNNE3VO-a{ zq=DXZ-_puXjCRxQCS4deBVO0`To}3f#^bq~&FF7#G*@>K=GyRn$8lZtS?vEwnUG_S zujvF@68|;Wulj#>`-6w`f4B2o#s70+xZ)<9Kpw;08&6PyQ@cB;D04lZkk{7DIEI=D zyq0&!198nXFb$9zT}1inGB58hzm`K)+TOhtg`;Ch8{S%JL3B}bYmJVTusqq^p*N*o ztdX+q%lvxr_H}K8Y-}jSOdX4|d&N@78_2b|&>zKO^vZsOh9;`xv4Gg?IusT%=Sm}R zv+jk(J-bhihO3#+F^))#^E>XaLQKtw_Oc+0=b|~U&TN^$p}XOzl#}}c&%12Yb>9}v zYps{&>&)Wz-hYR!xq+8Zi?y4=DoVU{Qd!)CvN>H@#I)S0)L6=E$74^!Df))|rL}B| zZr|BZ9`!r(H7%b(*YiXyhv!XnhET27{I0>E)kc;F%r>u-T_W@C>X>@CH2aWt%H3J8 zo^EYpUBDZ5Re4=plVh&tv|0e@nw~3vurA-Y_HXj{21nhW?Ad>Kn?CS&08vlC+hzYqL;ui#MQ2d}H-$s-$f0e#2bc z*q_a5H=odJYPu@aC7?E}@BG%%tE;@mwe2^|F0Fl9!ZIx^kygaq=Z8h*?#6$-*#FCM zOs;v?=l3peBJ ztr2rIx2&nQnn$)Z;I0JZ0?$R5>arC;`pOB!$-i*?0dvvfqALOOs)RFBCop#*wfnH8 zvM~7^wu|65eI-(*A3;+P#m>@N^$Pg+#;!TnV4J(W)vjJ84y@`dZjJ)02MNYR_|C_* zNR_(~mlMe^x-xl6SpFpNVJ0W*kEdnfDQ1Ub>vjjA!nWbPF-l`;{rJJu-IUyumfr#;lO`X}^7#O&x#C7L)#Hf};zO@4{$$ zYy36D$HGxm7<*jzhi zE(}_1FV%p(7W9g9M-|(}<_8(IHSbHTV(%rwc-QNvZ~a-k{`cpyt9t>KtpA_w@Aa$k zU%StrKdk?6rDv`^Zlq097wT6aSB3#nZ?bp2Y<6Q8BISHKuk%#p-#Z}nrA_7?bM?VDn@Zx)#t zNz~j#d)&hcc6~8bnH!_S+&flUFfHdELFl-)Tqt-r%m=!4U1aabZ@6B1HS;#-GI!4xgX6?eqvOK zgPqDsMaEbjWt?MLI2D7S>bOBP&NYYC7#07CYpIQDy3%|0hR!}9?gvS>pi!)0;C-=N zj!Y55+6TYTU#N>l{7(Er8&*1|af7)UIZJ_XrzOy<>;W*~AActv*VS5nK{JYDc--wO zvq&wsUOrOAtrk#NAR32i6;`lDG5X#+N5C2-?#H^u+W+AMCsQ(}jN{F>vBduGKi{q1 z|GT^Y;>Cmge;dyhoFJ(*l~BBGI6zH16owf|Vx`4X6kVV(7GY-#-cN`Ck)zSJ5I$Ln0Sr$GVok2{v=!9b;d-`94ekUCMi}&=u?!{!>Q~$U>#fX5BYy?<=KK)L{c;* zP8xn7L$0irBQ=qAI*0J{-*GZ6zy9a;M9Neg^m=OA2*soqvq~ZyP8`MEId8WTUY7<48l+0Cl__Y1d)u-55OUA8?kMM5u5hxZ`GJ?afrmE^9U6B zkT*PsELC=DUP|MIQu4vII~#N^2#pUwt>DS{N0fG^SR!RTE0hhw4NE5VL4>0$NlYxH z-qCiorgLK&LGX8+Bn&<>p2UGcVrvVIspKrq^fpANqm{HVL{iRXB*p@eqq+0+seYxH z3%Hz+XaW(U$^_Exe|^wlh7* z)I&2w68+6CmMr&Mvg(kLh^1IBreZQ0VQuIN9GOLnYIX|_MYF9%+^iW%6S=;?xgV1J*LUv%>zfeHRTsQEJPp*p{}t?< z!$MRhh{q*V)Qj6jC1tTAN{uQhiybCC^_xo4gjtSC#HJ~un93ZX!mn~(W|dY{3{h%S zxj9s{KpmkJ4N0=1NHqw{m1J!7r%4)@3*7ZZX%h|jqaAs44wd@0)8KivyLR-IhLnh_ zb-$;M-Hz`mJ?2r+-9eS??AVRO(1(rLZ;*;>)Oa)*W16uKnIHSL@VOY%C z=S_H`H03PiM0-K=qIyh z=1r)sil>^%aFlT(=N8ya;7PH(Wc=c!j_#^>S_tdmxHg_BC1;q=NQAGdmkv*k9fTVk z_eqMb$)A5`ocxPXi4r(rF&t)+i3laj%!hULr}d|))kqmxho>;1FvRSh=rxpHy3C5X za4+2Z<9f7g$5We7Z#ZXsE#v9Nt)@*qYp;b?gSv+OcH@TIrkICBXJ}ab+?kSQ zFOJyOcsxbp z{-t> za~G5QH)Bu~Pe~x{!g`3J3ryqlh9Q8z*>ER7PB1OYgRKRaD#0{+HEP;tMvMa)JJ3f75<<=F=WRqKm4X{3?tc@aMwrVV%?{!z z{yoD&7IsyQsQEfT)#}=fM?jP^X^;9SIeGuX83;0_wj3FeNKeyxXAVoZZRh0Gubzv) z_p-$-bDqVbV1o*~6{2ce;0H9s$xW>m+M2$?md0jht!0}PiKdD>commDR^E1mD*&d# zS~OVf_OTe~peQp&N?n*=AelCt8HID>w+}^%qfp=kN0RY!Ew!dN7qh5b=6sPuM_a0^ znTMO=jH_>RZHhCN@q~%GFp9eYcSUBU9;g?*oS%zSqjm1ofhzHChB+s38PqbKD93%2 zy_82B8-vH~g{fk5_&r038n_rxnrhq@x=~ldK>wSf2$%L74wHmk+9Q@GE;cV`(@HYa z=@>6yl=W!fqvFw^t?z(hZS9#!R!Ed2S_MBA9RR(ztXQt8P`<(S)tZkl4JNSO*NfirtEvxe6#n9|ai&x)Rd|CnRn3!O zm7kiw&a#m{&m2tW`A^>$qMc!HO7;J_z2yK3n~kHvEw%~ntC8;l&-0C(oP74U#RXnt zgYH`7%d-jVW8qD ze9e(q-_qmeVaYfe0yzig4!v@;5QgCak%rqe} zjc~J~^a?a=shkvF|GB;8m2KHp(H!(XHEC%#Wy{@Tt8GK();%_?d_I(Ek|))pG3JI9 z)i04Bk=BPr@+^>_^Uco}gP5=^iNT;?67{%dp5}{+5y!u0 zn95}CDo3S7$N8gkHEXKXh~?U!Bw6k)y4l^LjWPNP?u8pa0l5#__6IO3_4{Bpso8bvi zM!T-G-;KIaeSznLa_6)u-PAp+wLTPQ|F00@fA`^P_jqo$Y zB`G=l&F2-8%-kf?mF%*Wl0${`I;m*R?QHhcZvCoteV#I+yq-U%5Fw#`ZSx|!@u_$8 z&;%oni4kTc7%%u+L6y5w?1SepFCi zzpCVSzxuG&tuu>fw~)F8sZP%e*~ud*oB$b@nO-Xmv5B2m_-Gd#kv2j|0HZd;(wt?6 z0GYsGv9PNqw*zR%bVK@R+4z+*Nr~L2yyp1no0K;-dX3|!vqIhR=qViT(bzRzxO%?! z*m+m(Ze#%KW9QuwdA9@TUH5V;qqt=JDt9PcQTDtWGp;p$^>H)#&+m4~yzI-jBXcW^ zTZDu_H0G!@cI%GYk%O`!y;&c>9M3x*!yJ#>8pmaG9;0J+eVau=dHrewk_U?h(IR`- zQ=h@&b|*jWius5*C3L*p53_>L7d+ABdxg;VWVfqYE2LOLU(=@>c5bIhYVV#bbiD^kwr+G=u z$i>MiAF|EZ)T3iUKXp11jhBHna3mW0U}5bT zu}5=22K-50F~2oIJG37kCYNX~9s?N_cqp1Z)`x`Tq*lm~CO zIrNF3=-BDlJ#oF=y^J^iHete~?ky0M@PQ@S6qh&7pThCT@m|mko`T=ZTb;Em2y9xb zm)-i*=RT>UM*Dde} zTOq&}XbNbgxAabv*KE$Sk}hm`3^K|6jj$N}Z0=r08)NhBjd}rn7^Nvm{+= z$&X-KKBE*q7^zwQNa>? zovTLr{oB24q(8XAbt5a@H?s7KxoclMqQ~ZVq%_iq@kq^_Oyv7LX1X&OG?dQLnKCdo zsthHVOHmp&n^)DI*8pYz!gOx$570^1D?{}%2%c7``7#JJ&p)mdT`{YBwSh-Y)PGeV zDw*Efv(fu6M71F^Y?vAsW}N`(LYJxTlljIz>-61bUhT$BBOioOtUERGsM|%&D8d;OLS$Dq-bGHuDBnAdXlle{vKmQ$b zyg2E#OLnbT0Hwrs$^?_`=yh*|sN$&@rc6_*SnV1|kfQFnpRUYA?RC2FlcIvLgKT!; z^BPXGy|9GV2>ssMvZcEkxvu5%CD^D2-#NPFhGv z&PIm0j;*V~TFx$0Q>Cm4(({f&=<_3n5nPsPD>qLyJQIFPA4?c1#f9h`h!7bTt2b$E zbSp)cPep_&;)LZXP4j#JNR(^G3=7q~Q&>(K5uK}XEYK)=VdCV=vgUWq$c#c;EW*1? z3_Fu>Ji^?msh`O6_v@65%@Fst@}J#I;BJHj7CTl- z>?b-lgXj7n8O-(+v-~9RC`%wA^gsla@}$mWv^yvAquUFh$V>I|xs~?GF1ol&~^Oug|mnoWS*2{Fv5fMhB z@uvuwR6d-A=1VxX7UhneqGDI57knwRZ zF{MQBhvTC+Z_nOzch$806HTxX#RNH=o4GME$=J~)DD5%k*l2Y^;gS=nZyOP8Brg%i zotOy8$uN^80y(UR0Kb4z#uS3XGdMmA;ML*T@!8YP$K&^ZfA`aS_;`4FdieJJ@tZSv zcM3=E-o8G5fBf$48NB-|9KQVre1H7*^;5vaIPyNFI-dmNK&E;+4?AZzIgD#O_ugw) zEQugNbey3vhB2FAvq_L*J|)6Tkch^egiMLcPf_bm_zhh=^6)%756_)H|33f#|NrB> J+Z+IZ0RY0N)YJd~ literal 0 HcmV?d00001 diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/.helmignore b/ansible/roles/helm_install/files/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/Chart.yaml b/ansible/roles/helm_install/files/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/README.md b/ansible/roles/helm_install/files/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/charts/common/values.yaml b/ansible/roles/helm_install/files/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/rabbitmq/ci/default-values.yaml b/ansible/roles/helm_install/files/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/roles/helm_install/files/rabbitmq/ci/tolerations-values.yaml b/ansible/roles/helm_install/files/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/ansible/roles/helm_install/files/rabbitmq/override-values.yaml b/ansible/roles/helm_install/files/rabbitmq/override-values.yaml new file mode 100644 index 0000000..9c30390 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/override-values.yaml @@ -0,0 +1,31 @@ +auth: + username: root + password: "root" +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp rabbitmq_web_stomp" +extraContainerPorts: + - name: stomp-ws + containerPort: 15674 + - name: stomp-tcp + containerPort: 61613 +#nodeSelector: {"datasaker/group": "data"} +tolerations: +- key: "dev/data-kafka" + operator: "Exists" +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka +service: + type: NodePort + extraPorts: + - name: stomp-ws + port: 15674 + targetPort: stomp-ws + - name: stomp-tcp + port: 61613 + targetPort: stomp-tcp diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/NOTES.txt b/ansible/roles/helm_install/files/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..01fc6fc --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/NOTES.txt @@ -0,0 +1,172 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/_helpers.tpl b/ansible/roles/helm_install/files/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..c32ecb7 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations ))) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting the corresponding annotations + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/configuration.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..1888d83 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration }} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/extra-list.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/ingress.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..09ba870 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/ingress.yaml @@ -0,0 +1,60 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/networkpolicy.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/pdb.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..84c69b3 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/prometheusrule.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/role.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/rolebinding.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/secrets.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..3e810cc --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/secrets.yaml @@ -0,0 +1,46 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if not .Values.auth.existingPasswordSecret }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..339bc2e --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/servicemonitor.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..3d7e9b8 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- else if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/statefulset.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..435bfcf --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,375 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + - name: RABBITMQ_CLUSTER_REBALANCE + value: "true" + {{- end }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: {{ ternary "yes" "no" .Values.loadDefinition.enabled | quote }} + - name: RABBITMQ_DEFINITIONS_FILE + value: {{ .Values.loadDefinition.file | quote }} + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration }} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/svc-headless.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..5168b41 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + publishNotReadyAddresses: true diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/svc.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..381ddf0 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/svc.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.epmdPortEnabled }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.epmdNodePort)) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.distPortEnabled }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.distNodePort)) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.managerNodePort)) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.metricsNodePort)) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/roles/helm_install/files/rabbitmq/templates/tls-secrets.yaml b/ansible/roles/helm_install/files/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..08c8a54 --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/rabbitmq/values.schema.json b/ansible/roles/helm_install/files/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/rabbitmq/values.yaml b/ansible/roles/helm_install/files/rabbitmq/values.yaml new file mode 100644 index 0000000..8144a1b --- /dev/null +++ b/ansible/roles/helm_install/files/rabbitmq/values.yaml @@ -0,0 +1,1215 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section RabbitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.9.15-debian-10-r0 + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + +## @section Common parameters +## + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.file Name of the definitions file + ## + file: "/app/load_definition.json" + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = CHANGEME + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters +## + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + ## + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters +## + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: true + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom RabbitMQ images + ## + mountPath: /bitnami/rabbitmq/mnesia + ## @param persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + ## @param persistence.annotations Persistence annotations. Evaluated as a template + ## Example: + ## annotations: + ## example.io/disk-volume-type: SSD + ## + annotations: {} + +## @section Exposure parameters +## + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + ## + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPortEnabled Erlang distribution server port + ## + distPortEnabled: true + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdPortEnabled RabbitMQ EPMD Discovery service port + ## + epmdPortEnabled: true + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: [] + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting the corresponding annotations + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## DEPRECATED: Use ingress.annotations instead of ingress.certManager + ## certManager: false + ## + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template + ## Useful when looking for additional customization, such as using different backend + ## + extraRules: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters +## + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings MetricsRelabelConfigs to apply to samples before ingestion. DEPRECATED: Will be removed in next major. + ## + relabellings: [] + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + ## + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters +## + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r394 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} diff --git a/ansible/roles/helm_install/files/redis/.helmignore b/ansible/roles/helm_install/files/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/roles/helm_install/files/redis/Chart.lock b/ansible/roles/helm_install/files/redis/Chart.lock new file mode 100644 index 0000000..93fb935 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.17.1 +digest: sha256:dacc73770a5640c011e067ff8840ddf89631fc19016c8d0a9e5ea160e7da8690 +generated: "2022-09-21T17:20:26.455768358+09:00" diff --git a/ansible/roles/helm_install/files/redis/Chart.yaml b/ansible/roles/helm_install/files/redis/Chart.yaml new file mode 100644 index 0000000..f07aa1d --- /dev/null +++ b/ansible/roles/helm_install/files/redis/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.2.6 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Redis(TM) is an open source, advanced key-value store. It is often referred + to as a data structure server since keys can contain strings, hashes, lists, sets + and sorted sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 16.6.0 diff --git a/ansible/roles/helm_install/files/redis/README.md b/ansible/roles/helm_install/files/redis/README.md new file mode 100644 index 0000000..a952b20 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/README.md @@ -0,0 +1,866 @@ + + +# Redis(TM) packaged by Bitnami + +Redis(TM) is an open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. + +[Overview of Redis™](http://redis.io) + +Disclaimer: Redis is a registered trademark of Redis Labs Ltd. Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [Redis™](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between Redis™ Helm Chart and Redis™ Cluster Helm Chart + +You can choose any of the two Redis™ Helm charts for deploying a Redis™ cluster. + +1. [Redis™ Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster, with the [option](https://github.com/bitnami/charts/tree/master/bitnami/redis#redis-sentinel-configuration-parameters) of enabling using Redis™ Sentinel. +2. [Redis™ Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a Redis™ Cluster topology with sharding. + +The main features of each chart are the following: + +| Redis™ | Redis™ Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![Redis™ Topology](img/redis-topology.png) | ![Redis™ Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys Redis™ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | -------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.redis.password` | Global Redis™ password (overrides `auth.password`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### Redis™ Image parameters + +| Name | Description | Value | +| ------------------- | ------------------------------------------------------- | ---------------------- | +| `image.registry` | Redis™ image registry | `docker.io` | +| `image.repository` | Redis™ image repository | `bitnami/redis` | +| `image.tag` | Redis™ image tag (immutable tags are recommended) | `6.2.6-debian-10-r146` | +| `image.pullPolicy` | Redis™ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Redis™ image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | + + +### Redis™ common configuration parameters + +| Name | Description | Value | +| -------------------------------- | --------------------------------------------------------------------------------------- | ------------- | +| `architecture` | Redis™ architecture. Allowed values: `standalone` or `replication` | `replication` | +| `auth.enabled` | Enable password authentication | `true` | +| `auth.sentinel` | Enable password authentication on sentinels too | `true` | +| `auth.password` | Redis™ password | `""` | +| `auth.existingSecret` | The name of an existing secret with Redis™ credentials | `""` | +| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis™ nodes | `""` | + + +### Redis™ master configuration parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------------------------ | +| `master.configuration` | Configuration for Redis™ master nodes | `""` | +| `master.disableCommands` | Array with Redis™ commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.preExecCmds` | Additional commands to run prior to starting Redis™ master | `[]` | +| `master.extraFlags` | Array with additional command line flags for Redis™ master | `[]` | +| `master.extraEnvVars` | Array with extra environment variables to add to Redis™ master nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis™ master nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis™ master nodes | `""` | +| `master.containerPorts.redis` | Container port to open on Redis™ master nodes | `6379` | +| `master.startupProbe.enabled` | Enable startupProbe on Redis™ master nodes | `false` | +| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `20` | +| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `master.livenessProbe.enabled` | Enable livenessProbe on Redis™ master nodes | `true` | +| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `master.readinessProbe.enabled` | Enable readinessProbe on Redis™ master nodes | `true` | +| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `master.resources.limits` | The resources limits for the Redis™ master containers | `{}` | +| `master.resources.requests` | The requested resources for the Redis™ master containers | `{}` | +| `master.podSecurityContext.enabled` | Enabled Redis™ master pods' Security Context | `true` | +| `master.podSecurityContext.fsGroup` | Set Redis™ master pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled Redis™ master containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set Redis™ master containers' Security Context runAsUser | `1001` | +| `master.kind` | Use either Deployment or StatefulSet (default) | `StatefulSet` | +| `master.schedulerName` | Alternate scheduler for Redis™ master pods | `""` | +| `master.updateStrategy.type` | Redis™ master statefulset strategy type | `RollingUpdate` | +| `master.priorityClassName` | Redis™ master pods' priorityClassName | `""` | +| `master.hostAliases` | Redis™ master pods host aliases | `[]` | +| `master.podLabels` | Extra labels for Redis™ master pods | `{}` | +| `master.podAnnotations` | Annotations for Redis™ master pods | `{}` | +| `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis™ master pods | `false` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for Redis™ master pods assignment | `{}` | +| `master.nodeSelector` | Node labels for Redis™ master pods assignment | `{}` | +| `master.tolerations` | Tolerations for Redis™ master pods assignment | `[]` | +| `master.topologySpreadConstraints` | Spread Constraints for Redis™ master pod assignment | `[]` | +| `master.lifecycleHooks` | for the Redis™ master container(s) to automate configuration before or after startup | `{}` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the Redis™ master pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis™ master container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the Redis™ master pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the Redis™ master pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence on Redis™ master nodes using Persistent Volume Claims | `true` | +| `master.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `master.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `master.persistence.path` | The path the volume will be mounted at on Redis™ master containers | `/data` | +| `master.persistence.subPath` | The subdirectory of the volume to mount on Redis™ master containers | `""` | +| `master.persistence.storageClass` | Persistent Volume storage class | `""` | +| `master.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume size | `8Gi` | +| `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `master.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `master.persistence.dataSource` | Custom PVC data source | `{}` | +| `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `master.service.type` | Redis™ master service type | `ClusterIP` | +| `master.service.ports.redis` | Redis™ master service port | `6379` | +| `master.service.nodePorts.redis` | Node port for Redis™ master | `""` | +| `master.service.externalTrafficPolicy` | Redis™ master service external traffic policy | `Cluster` | +| `master.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `master.service.clusterIP` | Redis™ master service Cluster IP | `""` | +| `master.service.loadBalancerIP` | Redis™ master service Load Balancer IP | `""` | +| `master.service.loadBalancerSourceRanges` | Redis™ master service Load Balancer sources | `[]` | +| `master.service.annotations` | Additional custom annotations for Redis™ master service | `{}` | +| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` | + + +### Redis™ replicas configuration parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------- | ------------------------ | +| `replica.replicaCount` | Number of Redis™ replicas to deploy | `3` | +| `replica.configuration` | Configuration for Redis™ replicas nodes | `""` | +| `replica.disableCommands` | Array with Redis™ commands to disable on replicas nodes | `["FLUSHDB","FLUSHALL"]` | +| `replica.command` | Override default container command (useful when using custom images) | `[]` | +| `replica.args` | Override default container args (useful when using custom images) | `[]` | +| `replica.preExecCmds` | Additional commands to run prior to starting Redis™ replicas | `[]` | +| `replica.extraFlags` | Array with additional command line flags for Redis™ replicas | `[]` | +| `replica.extraEnvVars` | Array with extra environment variables to add to Redis™ replicas nodes | `[]` | +| `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis™ replicas nodes | `""` | +| `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis™ replicas nodes | `""` | +| `replica.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `replica.externalMaster.host` | External master host to bootstrap from | `""` | +| `replica.externalMaster.port` | Port for Redis service external master host | `6379` | +| `replica.containerPorts.redis` | Container port to open on Redis™ replicas nodes | `6379` | +| `replica.startupProbe.enabled` | Enable startupProbe on Redis™ replicas nodes | `true` | +| `replica.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `replica.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `replica.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `replica.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `replica.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `replica.livenessProbe.enabled` | Enable livenessProbe on Redis™ replicas nodes | `true` | +| `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `replica.readinessProbe.enabled` | Enable readinessProbe on Redis™ replicas nodes | `true` | +| `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `replica.resources.limits` | The resources limits for the Redis™ replicas containers | `{}` | +| `replica.resources.requests` | The requested resources for the Redis™ replicas containers | `{}` | +| `replica.podSecurityContext.enabled` | Enabled Redis™ replicas pods' Security Context | `true` | +| `replica.podSecurityContext.fsGroup` | Set Redis™ replicas pod's Security Context fsGroup | `1001` | +| `replica.containerSecurityContext.enabled` | Enabled Redis™ replicas containers' Security Context | `true` | +| `replica.containerSecurityContext.runAsUser` | Set Redis™ replicas containers' Security Context runAsUser | `1001` | +| `replica.schedulerName` | Alternate scheduler for Redis™ replicas pods | `""` | +| `replica.updateStrategy.type` | Redis™ replicas statefulset strategy type | `RollingUpdate` | +| `replica.priorityClassName` | Redis™ replicas pods' priorityClassName | `""` | +| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` | +| `replica.hostAliases` | Redis™ replicas pods host aliases | `[]` | +| `replica.podLabels` | Extra labels for Redis™ replicas pods | `{}` | +| `replica.podAnnotations` | Annotations for Redis™ replicas pods | `{}` | +| `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis™ replicas pods | `false` | +| `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` | +| `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` | +| `replica.affinity` | Affinity for Redis™ replicas pods assignment | `{}` | +| `replica.nodeSelector` | Node labels for Redis™ replicas pods assignment | `{}` | +| `replica.tolerations` | Tolerations for Redis™ replicas pods assignment | `[]` | +| `replica.topologySpreadConstraints` | Spread Constraints for Redis™ replicas pod assignment | `[]` | +| `replica.lifecycleHooks` | for the Redis™ replica container(s) to automate configuration before or after startup | `{}` | +| `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Redis™ replicas pod(s) | `[]` | +| `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis™ replicas container(s) | `[]` | +| `replica.sidecars` | Add additional sidecar containers to the Redis™ replicas pod(s) | `[]` | +| `replica.initContainers` | Add additional init containers to the Redis™ replicas pod(s) | `[]` | +| `replica.persistence.enabled` | Enable persistence on Redis™ replicas nodes using Persistent Volume Claims | `true` | +| `replica.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `replica.persistence.path` | The path the volume will be mounted at on Redis™ replicas containers | `/data` | +| `replica.persistence.subPath` | The subdirectory of the volume to mount on Redis™ replicas containers | `""` | +| `replica.persistence.storageClass` | Persistent Volume storage class | `""` | +| `replica.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `replica.persistence.size` | Persistent Volume size | `8Gi` | +| `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `replica.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `replica.persistence.dataSource` | Custom PVC data source | `{}` | +| `replica.service.type` | Redis™ replicas service type | `ClusterIP` | +| `replica.service.ports.redis` | Redis™ replicas service port | `6379` | +| `replica.service.nodePorts.redis` | Node port for Redis™ replicas | `""` | +| `replica.service.externalTrafficPolicy` | Redis™ replicas service external traffic policy | `Cluster` | +| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `replica.service.clusterIP` | Redis™ replicas service Cluster IP | `""` | +| `replica.service.loadBalancerIP` | Redis™ replicas service Load Balancer IP | `""` | +| `replica.service.loadBalancerSourceRanges` | Redis™ replicas service Load Balancer sources | `[]` | +| `replica.service.annotations` | Additional custom annotations for Redis™ replicas service | `{}` | +| `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-replicas pods | `30` | +| `replica.autoscaling.enabled` | Enable replica autoscaling settings | `false` | +| `replica.autoscaling.minReplicas` | Minimum replicas for the pod autoscaling | `1` | +| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` | +| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` | +| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` | + + +### Redis™ Sentinel configuration parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `sentinel.enabled` | Use Redis™ Sentinel on Redis™ pods. | `false` | +| `sentinel.image.registry` | Redis™ Sentinel image registry | `docker.io` | +| `sentinel.image.repository` | Redis™ Sentinel image repository | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis™ Sentinel image tag (immutable tags are recommended) | `6.2.6-debian-10-r144` | +| `sentinel.image.pullPolicy` | Redis™ Sentinel image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Redis™ Sentinel image pull secrets | `[]` | +| `sentinel.image.debug` | Enable image debug mode | `false` | +| `sentinel.masterSet` | Master set name | `mymaster` | +| `sentinel.quorum` | Sentinel Quorum | `2` | +| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `220` | +| `sentinel.automateClusterRecovery` | Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. | `false` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis™ node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` | +| `sentinel.configuration` | Configuration for Redis™ Sentinel nodes | `""` | +| `sentinel.command` | Override default container command (useful when using custom images) | `[]` | +| `sentinel.args` | Override default container args (useful when using custom images) | `[]` | +| `sentinel.preExecCmds` | Additional commands to run prior to starting Redis™ Sentinel | `[]` | +| `sentinel.extraEnvVars` | Array with extra environment variables to add to Redis™ Sentinel nodes | `[]` | +| `sentinel.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis™ Sentinel nodes | `""` | +| `sentinel.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis™ Sentinel nodes | `""` | +| `sentinel.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `sentinel.externalMaster.host` | External master host to bootstrap from | `""` | +| `sentinel.externalMaster.port` | Port for Redis service external master host | `6379` | +| `sentinel.containerPorts.sentinel` | Container port to open on Redis™ Sentinel nodes | `26379` | +| `sentinel.startupProbe.enabled` | Enable startupProbe on Redis™ Sentinel nodes | `true` | +| `sentinel.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `sentinel.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `sentinel.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `sentinel.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis™ Sentinel nodes | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis™ Sentinel nodes | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `sentinel.persistence.enabled` | Enable persistence on Redis™ sentinel nodes using Persistent Volume Claims (Experimental) | `false` | +| `sentinel.persistence.storageClass` | Persistent Volume storage class | `""` | +| `sentinel.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `sentinel.persistence.size` | Persistent Volume size | `100Mi` | +| `sentinel.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `sentinel.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `sentinel.persistence.dataSource` | Custom PVC data source | `{}` | +| `sentinel.resources.limits` | The resources limits for the Redis™ Sentinel containers | `{}` | +| `sentinel.resources.requests` | The requested resources for the Redis™ Sentinel containers | `{}` | +| `sentinel.containerSecurityContext.enabled` | Enabled Redis™ Sentinel containers' Security Context | `true` | +| `sentinel.containerSecurityContext.runAsUser` | Set Redis™ Sentinel containers' Security Context runAsUser | `1001` | +| `sentinel.lifecycleHooks` | for the Redis™ sentinel container(s) to automate configuration before or after startup | `{}` | +| `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Redis™ Sentinel | `[]` | +| `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis™ Sentinel container(s) | `[]` | +| `sentinel.service.type` | Redis™ Sentinel service type | `ClusterIP` | +| `sentinel.service.ports.redis` | Redis™ service port for Redis™ | `6379` | +| `sentinel.service.ports.sentinel` | Redis™ service port for Redis™ Sentinel | `26379` | +| `sentinel.service.nodePorts.redis` | Node port for Redis™ | `""` | +| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `""` | +| `sentinel.service.externalTrafficPolicy` | Redis™ Sentinel service external traffic policy | `Cluster` | +| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `sentinel.service.clusterIP` | Redis™ Sentinel service Cluster IP | `""` | +| `sentinel.service.loadBalancerIP` | Redis™ Sentinel service Load Balancer IP | `""` | +| `sentinel.service.loadBalancerSourceRanges` | Redis™ Sentinel service Load Balancer sources | `[]` | +| `sentinel.service.annotations` | Additional custom annotations for Redis™ Sentinel service | `{}` | +| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-node pods | `30` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.enabled` | Enable PodSecurityPolicy's RBAC rules | `false` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` | +| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | +| `tls.enabled` | Enable TLS traffic | `false` | +| `tls.authClients` | Require clients to authenticate | `true` | +| `tls.autoGenerated` | Enable autogenerated certificates | `false` | +| `tls.existingSecret` | The name of the existing secret that contains the TLS certificates | `""` | +| `tls.certificatesSecret` | DEPRECATED. Use existingSecret instead. | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate Key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.dhParamsFilename` | File containing DH params (in order to support DH based ciphers) | `""` | + + +### Metrics Parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------------ | +| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis™ metrics | `false` | +| `metrics.image.registry` | Redis™ Exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis™ Exporter image repository | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis™ Redis™ Exporter image tag (immutable tags are recommended) | `1.35.1-debian-10-r16` | +| `metrics.image.pullPolicy` | Redis™ Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Redis™ Exporter image pull secrets | `[]` | +| `metrics.command` | Override default metrics container init command (useful when using custom images) | `[]` | +| `metrics.redisTargetHost` | A way to specify an alternative Redis™ hostname | `localhost` | +| `metrics.extraArgs` | Extra arguments for Redis™ exporter, for example: | `{}` | +| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis™ exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled Redis™ exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Redis™ exporter containers' Security Context runAsUser | `1001` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis™ metrics sidecar | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis™ metrics sidecar | `[]` | +| `metrics.resources.limits` | The resources limits for the Redis™ exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the Redis™ exporter container | `{}` | +| `metrics.podLabels` | Extra labels for Redis™ exporter pods | `{}` | +| `metrics.podAnnotations` | Annotations for Redis™ exporter pods | `{}` | +| `metrics.service.type` | Redis™ exporter service type | `ClusterIP` | +| `metrics.service.port` | Redis™ exporter service port | `9121` | +| `metrics.service.externalTrafficPolicy` | Redis™ exporter service external traffic policy | `Cluster` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.service.loadBalancerIP` | Redis™ exporter service Load Balancer IP | `""` | +| `metrics.service.loadBalancerSourceRanges` | Redis™ exporter service Load Balancer sources | `[]` | +| `metrics.service.annotations` | Additional custom annotations for Redis™ exporter service | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` | +| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Metrics RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | Metrics RelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` | +| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` | +| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10-debian-10-r355` | +| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | +| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` | +| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` | +| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10-debian-10-r355` | +| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` | +| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctl.resources.limits` | The resources limits for the init container | `{}` | +| `sysctl.resources.requests` | The requested resources for the init container | `{}` | + + +### useExternalDNS Parameters + +| Name | Description | Value | +| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `useExternalDNS.enabled` | Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. | `false` | +| `useExternalDNS.additionalAnnotations` | Extra annotations to be utilized when `external-dns` is enabled. | `{}` | +| `useExternalDNS.annotationKey` | The annotation key utilized when `external-dns` is enabled. | `external-dns.alpha.kubernetes.io/` | +| `useExternalDNS.suffix` | The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. | `""` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.password=secretpassword \ + bitnami/redis +``` + +The above command sets the Redis™ server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use a different Redis™ version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/redis/configuration/change-image-version/). + +### Bootstrapping with an External Cluster + +This chart is equipped with the ability to bring online a set of Pods that connect to an existing Redis deployment that lies outside of Kubernetes. This effectively creates a hybrid Redis Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single Redis Deployment. This is helpful in situations where one may be migrating Redis from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration: + +```yaml +replica: + externalMaster: + enabled: true + host: external-redis-0.internal +sentinel: + externalMaster: + enabled: true + host: external-redis-0.internal +``` + +:warning: This is currently limited to clusters in which Sentinel and Redis run on the same node! :warning: + +Please also note that the external sentinel must be listening on port `26379`, and this is currently not configurable. + +Once the Kubernetes Redis Deployment is online and confirmed to be working with the existing cluster, the configuration can then be removed and the cluster will remain connected. + +### External DNS + +This chart is equipped to allow leveraging the ExternalDNS project. Doing so will enable ExternalDNS to publish the FQDN for each instance, in the format of `..`. +Example, when using the following configuration: + +```yaml +useExternalDNS: + enabled: true + suffix: prod.example.org + additionalAnnotations: + ttl: 10 +``` + +On a cluster where the name of the Helm release is `a`, the hostname of a Pod is generated as: `a-redis-node-0.a-redis.prod.example.org`. The IP of that FQDN will match that of the associated Pod. This modifies the following parameters of the Redis/Sentinel configuration using this new FQDN: + +* `replica-announce-ip` +* `known-sentinel` +* `known-replica` +* `announce-ip` + +:warning: This requires a working installation of `external-dns` to be fully functional. :warning: + +See the [official ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns) for additional configuration options. + +### Cluster topologies + +#### Default: Master-Replicas + +When installing the chart with `architecture=replication`, it will deploy a Redis™ master StatefulSet (only one master node allowed) and a Redis™ replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed: + +- Redis™ Master service: Points to the master, where read-write operations can be performed +- Redis™ Replicas service: Points to the replicas, where only read operations are allowed. + +In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Standalone + +When installing the chart with `architecture=standalone`, it will deploy a standalone Redis™ StatefulSet (only one node allowed). A single service will be exposed: + +- Redis™ Master service: Points to the master, where read-write operations can be performed + +#### Master-Replicas with Sentinel + +When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Redis™ master StatefulSet (only one master allowed) and a Redis™ replicas StatefulSet. In this case, the pods will contain an extra container with Redis™ Sentinel. This container will form a cluster of Redis™ Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + +- Redis™ service: Exposes port 6379 for Redis™ read-only operations and port 26379 for accessing Redis™ Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis™ Sentinel cluster and query the current master using the command below (using redis-cli or similar): + +``` +SENTINEL get-master-addr-by-name +``` + +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using a password file + +To use a password file for Redis™ you need to create a secret containing the password and then deploy the chart using that secret. + +Refer to the chart documentation for more information on [using a password file for Redis™](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/use-password-file/). + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +Refer to the chart documentation for more information on [creating the secret and a TLS deployment example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-tls/). + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +Redis™ may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. + +Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/). + +## Persistence + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set master.persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +Refer to the chart documentation for more information on [backing up and restoring Redis™ deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/backup-restore/). + +## NetworkPolicy + +To enable network policy for Redis™, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +Refer to the chart documenation for more information on [enabling the network policy in Redis™ deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-network-policy/). + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### To 16.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + - `master.service.port` renamed as `master.service.ports.redis`. + - `master.service.nodePort` renamed as `master.service.nodePorts.redis`. + - `replica.service.port` renamed as `replica.service.ports.redis`. + - `replica.service.nodePort` renamed as `replica.service.nodePorts.redis`. + - `sentinel.service.port` renamed as `sentinel.service.ports.redis`. + - `sentinel.service.sentinelPort` renamed as `sentinel.service.ports.sentinel`. + - `master.containerPort` renamed as `master.containerPorts.redis`. + - `replica.containerPort` renamed as `replica.containerPorts.redis`. + - `sentinel.containerPort` renamed as `sentinel.containerPorts.sentinel`. + - `master.spreadConstraints` renamed as `master.topologySpreadConstraints` + - `replica.spreadConstraints` renamed as `replica.topologySpreadConstraints` + +### To 15.0.0 + +The parameter to enable the usage of StaticIDs was removed. The behavior is to [always use StaticIDs](https://github.com/bitnami/charts/pull/7278). + +### To 14.8.0 + +The Redis™ sentinel exporter was removed in this version because the upstream project was deprecated. The regular Redis™ exporter is included in the sentinel scenario as usual. + +### To 14.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The term *slave* has been replaced by the term *replica*. Therefore, parameters prefixed with `slave` are now prefixed with `replicas`. + - Credentials parameter are reorganized under the `auth` parameter. + - `cluster.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. + - `securityContext.*` is deprecated in favor of `XXX.podSecurityContext` and `XXX.containerSecurityContext`. + - `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones. +- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami-labs/readme-generator-for-helm). + +Consequences: + +Backwards compatibility is not guaranteed. To upgrade to `14.0.0`, install a new release of the Redis™ chart, and migrate the data from your previous release. You have 2 alternatives to do so: + +- Create a backup of the database, and restore it on the new release as explained in the [Backup and restore](#backup-and-restore) section. +- Reuse the PVC used to hold the master data on your previous release. To do so, use the `master.persistence.existingClaim` parameter. The following example assumes that the release name is `redis`: + +```bash +$ helm install redis bitnami/redis --set auth.password=[PASSWORD] --set master.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[PASSWORD]_ with the password used in your previous release. + +### To 13.0.0 + +This major version updates the Redis™ docker image version used from `6.0` to `6.2`, the new stable version. There are no major changes in the chart and there shouldn't be any breaking changes in it as `6.2` is basically a stricter superset of `6.0`. For more information, please refer to [Redis™ 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES). + +### To 12.3.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis™ exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis™ Sentinel containers inside of the Redis™ Pods (feature disabled by default). In case the master crashes, a new Redis™ node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis™ Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the Redis™ Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis™ Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis™ Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### To 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis™ StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the Redis™ slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/redis/charts/common-1.17.1.tgz b/ansible/roles/helm_install/files/redis/charts/common-1.17.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..d2e66eb702c03db2f57932528265520573877492 GIT binary patch literal 14611 zcmV+uIqb$CiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBhciT9U=zP|%s7t3eb|#`EJ8@<_o4xBenHit%PJHcja%Q{d zwjmOd(53)30LsxMxxf7$yh!k+hb=#{Yko*%5-1c3RfR&KP>9%c%4qLsg18LlXqxLw~UQ>_z_%(7yp(YELF4;(zF0 zyRB;HzL5t}$|RD6QE|`#h>*l%#^(ocOh+6E$+Jji9CuJkK42~gqX#hC?VvQx{~Uya z7vZ22V-azZO8xE-ejr1R_#FO@lPPHSf)V2|<}6DI9fM&!NZ6P}FvRi_V+uneDVh?9 zRCy700NziCFn_-hFW|uT!C>FhhjhS%Yx>Lk2!jz6X7kGZjcq|S& zUE8rv7bZAKG4FIi;%S;7i91~wWi(P0>U3RSJ5xlcM1*3lt5c*p^2&Y$$au0szdELx zZCkL6M_7YfUb853d3hQ7-R{VFiVq+$BiWf%2K=sB_u<*VWA*g;=+o`6mRyq%=crgdAe|c=LhilGlUm-4qw0KUjSH& zIg*Skd)j+#x_RKe0Z_f-h>kIY=5ra6YDYBzU*PwQNnHJ^s$&|v*O!=#C-MLW9rvMI zAIAye@wLWL0l3+5G%ctqZaP|h7!`ywoZv_@{vPwGC3yE+8p{Qvca4F|$4m)(0D+N~ z8yP_}c&1Ne1o(FdRimy=+jISOHLVeP0C6I48LHuzb)cFWC8(C(3uv!3V9H`g|DO6y zC0TzwQWjVAVZ^2>qnOHkEbu4E(T|F0e?UW=hycP5#$d00QMIYWpJV`!Rvsr01=sCI zITCR2QdLD+A|VJMEI#CAYe;xibf0#hj_B1@b3t__)K2gc1=dM>A&t|b$pbyO+tsnq z6-78Lk=Igj2fhF$G*;am^vhyu8JaQ)+jyGFxkt2}YEeizp>hNPO4INn8)8ne#3CfD zmsdJ~??m8JzSf+l->Z7&(FLAAg-2Ry75(djQpM*?NuYsMat=Q!G*x;ga5L z;Bs9s%!?Oftv4iD$`Ur7f3LJ}nLZOH)cORkIjwC@I7^e<-3Vtf3*Me^u0P*)n}u+5 zBKRktGgp?s>q}pQ>|IgvR+hU;lXaQ9p2V#pZwn-BW5RVihKOlt19pfvQ*O=-@g#hi z6|(vDUw!=_p%e{CvOy2P;`RSvu>1U3W&OY3fBx)Y{eK(J71sZ9fLkGn* z&;s`doxy}mqnD&ad`13J@IdnbP;FtH#S_6Xs+~a^43rp zXseOR72O(^ZAftFSVW2G4GtsZg4VjS@;oB=IOq#L8;QuW_5CvR%ImA`v>~ zScnw~?$4x%A*n&?UguK)WMVzsGHw@d{@Sm^UaFaUSh!%dJ+HKEWr>_o=fMXj( zv!Yb-A&S<}H`kxs1LD@pujTU7z((N(#1Li*zkD zH%GO@ljAeYXC%Ta(hjA>Ral*b{Y{XN<1rDEug`z2bvH-Gzbn>eRH6hKt=!zGcE z%8FWnu~dEvmlG0AK#>{|DgaSHajcz_D1qSf*RKIU9^@K!Y_zFDiwM7MBme=&WBf@w zc7aeKF^YkWv>SHRjstxC3PT*BOke|<(6MGu)$a>z0%ToTLgC-{!aX(nWQkN50D^!} ziTMacSW{%gG8zYfIcJKKpS))<#K7^C&2S8t6Kn({LJ}ox{8+%0P%_PE(`Y<-vZ7I) zr77c5c(|72!J|y@d3*MAzx(HZ?mYSV|9<`FldoSN1+ZNr4}1a1&Lmfqu%3DaN3+i& zZo6w=*?-5U?*G+{wXxW*KP->`+8aC@RQ$isUhM5X_=g< zSvuw@R&tyP^{?57OOX&d9NxypfSe!+3ZzzGLu|^0oz71Jjq!omsGOmM#M&5mqZ;V< z;oxO$+oUKYY-CNS8cuMiLFU^@`rG$<7b!(@5=s_2tor=`0)ZorW&YzWn&JS05y45U ztC*tQem^MY=Ng8Co5FCg28M$i!x}19lFcCD9K5Jw;&&_GYHJvgItg~lo7&1IICXKu zu0cos2g{5+rku@4 zjA70)4pGK66$<&S2)VK=xNSUWZ}g=#B}*@ZUno?cw@hNA9PkWNKqHAcaIB0glq9$MMN`evrQ#4l$KrxPuVJ0Vd?%rPDRN0J>DkDWm)tebhvMGirL1Zd6g7#lC zE+?1+p(DnphC3rfRr_qHkMhKTWPYqBW|1hJs~Ka8IZApHcG;DzCppW82^JH^WOss- zsR@$r5+O1ydRs;3yN3O`mlKlUZayxyyd4>Fg)V+qsbu3NO2z67`2|LZB)7h0{qSt? zsoQ^KipHCZ{r|&q`|sJa=P#=E-~Nji5BA?}JZsp0p4TPiEL93nu?pB=p_=(&8A<;a zz&03bL;rcom@MMtoc$vw*p!CA?A|+*qHQ)Vw~*Uu*0D5KHj|nG!@SMVeew&ZOc2Rh zio1_R1&PL$QtFMjr1kwd&4COyHo-t}7`foDdKX@{)M$-5@P3m~1(@&b8A0%!=zS*+ zbm&{DSd}ii+wE>64atF7P;nKPo@K7rEV{rPjx|H%eLmCHFeuOhwqu-f93hG01JDkZ z5?^pkW6V#oBr&ywbKC4JVAr%LUSZnYFmC1B!PE9{yE_F}mt#~`Vet}Ho{{OjS4^qV zQ#7sen$LmuV1mz5K8?D06k@G~mw?h#ja%t$*vN?GzdEI+O{YrL?3eVG5FsPj_9I6< zFgtoYs$@B+U|MQex8ja&>w_H?JW6P6j{mV*L?Wq_j$LUhbJ$Jg3z)nn85?Ky%A#-y zdpF>-oXPE1x3E(>+5IjJv@6jbTxN%~vD^j(q_!&{)5N+4OvlSkfdE3Eer}&&=e~&9 zt#{`h3F}>(Ya4$W?0*}&vAG4nQu}|fTRs2VfAQe|y_IL3_}{;2zlFpwMA3zk2u#(K zq#Za!DB~+dk_;1Uw&hTg_yNL)C&zC0N%*ec`W$5$(6*9m6JfTvttZrewSU=fCydMD zoBgZmpFHurksi$45gF>)71#(61^kH!J;B6If618Iiff>r6)`mOz z(}+MRlHHQZ0VGtGq3smkMhQc5|5^3BYfiIjy}>Vv#TsVH#jCm#6H*|#G6QU6fYJlX zWPCqQG3fma#m{z!8OJLq2Dcl1Vxa_P4qwKy^=clYZ8t>7{Auk7nOca<2qgk94%RTl zxmuKk?b=a-gwWdNTJ$`wKwFcp_3DcQkk%*fjZ-%#ZiKn)P7y_8y(3{aw7g_#T6@0# z;S2;BQyh0x1u`O$vY|kAQB9}BEa)s@{OX-kJSx(9`K>8U6|+e6GBs{JOXS{GKA>G6 z%<{oiuzxh5)ii*GEY^A+q!w?S)6-tZ7q90QwCZ*#jjEa;@jaeH33N~;M3iJgVs5rV zxKi^gRm+Qe`syLNr17cS|MoEM?fL%)gPQ&SZ2$T52mAjvo;Cdco-HpVqA}vJIq)r7 z)q&+DSKR1;`XTJx#%Vv#Z0Dj0PNt!lnB0X6s_5^wPQ5+7!Av7-SGnzUXQCpUj$-6G89`xt8 z{?zrqVTui^|E2z){k`W^{r~(S{_l34HT3_RPpQ&S%A%##L81eVb?WF=HRbD=Zc)S@ zh1PYoHgV>sD*0}PIVUlOFUoAA5$x~jpJaNLjYi}X1l_t#Z;(9V7)gtiK~v%EqMng3 zq2|MRfx4N)mUX~RDD_Rx?sv*|_B&#JJ4eJ9;5bcCgdzB20Ku=p)jNf9!i};dnZxfH zO2~+)g-M#~&W4?j*g)6irD{WAF7gm`!KT>v$`z|dvusJolvpQ}oDc!qmQ=6bo~g3h z6PDs=C+r-LTr8LYk_Xv)v?qmwOVeG4(JSOFwu@=53f2~qj!)djoL9P)BZ##EE79Fw zLO!B4B4A~H_3+HeVW5O-`P2Q>%(j zHSV=6LXBUqElQ10H!V_!Ny0AusM;dWlMQobhCDGgN7|-YB1yL>RHOyaXN4j*9qQ;) zmp^~`q-^s5g6TZ}xoCs80;&sdN-ui(*^_svITdR5 zNEJ55?!f*Qu!|zp@4Y({U`%G1x@AQsFt=Iua%;moFsh2UsgY3}f}RGiaUyPqBBruVbUJX$khmaF~Ju5~_#HV~);`ek)~ zv8GPF-kPh$u00Z6<@;iO*O64Uo#$e2&D5v9jpc9-nwdE-P}MsZ zc)pU_(Mcq)p><3W$_uRHn1ct^ayzQ!0?(U|1itlrPO6a87`p0m0oS9MbEyJw@X{oC zF9~jg+>}$z)+DD~qa4;)hp1Zo5v8d+DBwk2DqLOyp)vlnsMez#>Gt5srQ+s5ESI+Y z5GUinvE&5Lu;ML!PBGya#dCLbt@SkIhIv&NB@1XYEkAf8f~ zg}4fbv?EVhRu|&0V?VWk<|2!H72ul=ilh@{D`fa-wcpsvXiijHu(8v7A)IQ_Namij zRLEs}d->H-k2!Wc{GzG5#boi@A3Sv zoK2Nh_pjg)0fZIR?@eIEa;2@=4=#^=Ys0#PYWjQ6z}C}rV;>f_ZieSIl1Q(%62>mF z6oQ$WzMZZ9sL>f~9Eo;(cfweK@XkDXfPE9rhL8E5{fmCe?BI;C=F*=VvPe4{t0CpW z`@+EfCHPMK5k?=V%-yOy%vQPwXo+<%5? z66&0NT;Y!(G>b>gL$L2#r>JXEXr5O=#m9nW6FzvWB%5(X&R%k7;%HW?# zp7&%sVV9W0wwfajlXQY$u(Mo80R1A|j-wgWD?L zG5&OFHtG+PX&KW4E5U zrRvSKNdaor{Cm5~Exue_Y2g-$*46b|_R}{0U2g-GitJR}%5Dz#%a}utzXZPoex3#2 zu2@#r7&6V6wf5#=GP!?GUH=!^95yurEYbh{{eCU}t3#?7g`$5SJ*SeUCd82&bqz#>60r2(f%SYQ6*$_uE(Zq3%2QO^*^XKLOWAnA}&e$vrcC8Lw2EPeLm2!L; zeAX`SP?a!;{=a_x8o(DAB7ygxL02cat2$|3Wh99K(;3VVH``|>yabuJ`=ujSp@JDU zHx$!(H{S?eJ{A>Fo*Ptu(IHT2LbTUIs2@5fw6b??hrJrwO?ZR^{%ftO_BM8_xv{Ek z_>)8{v?3i=TZwHj4$BaB8Vq9nXW_eSh0+q2A&aSZ#M#tMmSzN(j@-hHn>E7cpjjpdDC7!+|gR`%?cQ{YwBvypr^{7v7fT-jd8i`mi4TdKwDcyPK68#Sp3 zyXSaDu!+RSR1$8(A=n5Wei+5y-6yO<#m$28drk z<8p`WOpRZ$_h_nUh_JvnYj%4b4ZU68QcMOgZcAE^s1_j|?vnszv};SR!gB4Kl$E%h z3p^i``A|petjmd|_Nvidw)=VmKL5t?40D0MS1%7R6@nV`rg?>555&TxF441!*d^=y8VBNI3;wv!6m>_|8KwM|J{A|aR2wMJZsqh z9}Qyw8nR5<3tu=(5`78_jTf2yIZxU0@6mWc`tMW5 zLhl`$Q?#1B`-|BN{wtv|1YQ2iFYzBAeGwD1`+WZ(FzH4=9-h8Ee*3orI4yb)F_t)z zIEHOyl6#56zkWSXZ&aDDUw58@H92U0_k~I5XSU3fIdH7HiKz{rLo-I=I<@WHV5z>K zOb3QxIwPD>b8JEfXSd%#VQGW8aF;oB??tfm zevOb2h{haUYcg1(|DX2<`xX8FV*la%-|akW=zkjtgyG0>MUfe=xH1TFb4_9Gimz2- zr3!w_0k%1ZT;kU}n9Bw6msbp{g@BY#m)LS9?4#(cOR>(ejs0*j@Hy7G`$Kdswx(Sd zW!4m#9_E4RMN96`91=xc#uZsZcx_gF2d2NRbL6VT_12)3qc06BngW6aSvyu$f@ebj zR{~_pRv^T~p9JP79FNGSmB3u!d1xwCAi0P^KbEjq8x`jlDs7g-+O%20xG@|aRnPtC z47AlVWO|Qv$9=Q}F-a-QCxvQ}eWL2TS|=*9*4@^S$Oh&OmLXR7!Rp_d;~R?&nB_V^ z=Qv1f_vJab4Iz|%Ok!h5;(`ne*(Z_^#OK}t0pruwSsBQ9BAv^P@ydz6h01NIsJP4_ z3)IyesEws)mmF+ifuhGT)llF9rjf1c89V3iu0$d%0=Y7vO%1%2+@dS%jlI)u&4a%B zU^qA{jDqhfY#$NVu?4g*`5cwzYYFvk@_1jD?k*zUjQ3hJ_oo79LFBfAO$&VQrO)rg zk}iig*91uj|tz1ZKa=6`&#_v~T)cPr0T)_*@Lu6X@w z2X#mPfj3h&Q}CE@4pajYiUEl&c)Xtub8F z3b`B@>EEzc!eOT#;J#@mgrjtw$QyM-RO9Xa?wbpQeC-o>vJfmFLW@wfb6-RG6_~K7 zbpvE-!f7$Z{--!{R+#}e=F8cjF|Z2WS8(Pyunq98;>cMM%!b`Kt`lpF`xRX{ZBVZ1 zz;R&jx%TTFDr{s5ykuA*ZZ*o)%^p}jN zuO*vC%zuBkZn!1>A4bRQhW$T-{fbmX|7G(|Aj@+1 z#|h;3w%rqC(7vWG$ZzU~971*SuICW4^*7=WD#L1V2rYKwltHw(ah5ujDxGR~D&04S z&}$^o5Q%F2*uK6khte5#=RE4JB=c=4q{B>3-W0sz!O8ie#SNujziDT)b^Ikh7E9Km zH{ks;J_sL$d+8Rls_uV)Tg+7Oh3q=5=5Urf;}mCAx?I8K=6!ON8SP&Avt!iv+&iAH`ui-|PjVs`N6`z0w`)&K)Y$w(j z_pAEeTA*Cl_h!M~bKl#_9-1}_`X)Uz8(!zM_FmCGM7XS6UcK`2-!QkQi=dCrHH@>E&bKd4WO`Dqdm*aeu6_y72ZTpyX7giqatNNE3VO-a{ zq=DXZ-_puXjCRxQCS4deBVO0`To}3f#^bq~&FF7#G*@>K=GyRn$8lZtS?vEwnUG_S zujvF@68|;Wulj#>`-6w`f4B2o#s70+xZ)<9Kpw;08&6PyQ@cB;D04lZkk{7DIEI=D zyq0&!198nXFb$9zT}1inGB58hzm`K)+TOhtg`;Ch8{S%JL3B}bYmJVTusqq^p*N*o ztdX+q%lvxr_H}K8Y-}jSOdX4|d&N@78_2b|&>zKO^vZsOh9;`xv4Gg?IusT%=Sm}R zv+jk(J-bhihO3#+F^))#^E>XaLQKtw_Oc+0=b|~U&TN^$p}XOzl#}}c&%12Yb>9}v zYps{&>&)Wz-hYR!xq+8Zi?y4=DoVU{Qd!)CvN>H@#I)S0)L6=E$74^!Df))|rL}B| zZr|BZ9`!r(H7%b(*YiXyhv!XnhET27{I0>E)kc;F%r>u-T_W@C>X>@CH2aWt%H3J8 zo^EYpUBDZ5Re4=plVh&tv|0e@nw~3vurA-Y_HXj{21nhW?Ad>Kn?CS&08vlC+hzYqL;ui#MQ2d}H-$s-$f0e#2bc z*q_a5H=odJYPu@aC7?E}@BG%%tE;@mwe2^|F0Fl9!ZIx^kygaq=Z8h*?#6$-*#FCM zOs;v?=l3peBJ ztr2rIx2&nQnn$)Z;I0JZ0?$R5>arC;`pOB!$-i*?0dvvfqALOOs)RFBCop#*wfnH8 zvM~7^wu|65eI-(*A3;+P#m>@N^$Pg+#;!TnV4J(W)vjJ84y@`dZjJ)02MNYR_|C_* zNR_(~mlMe^x-xl6SpFpNVJ0W*kEdnfDQ1Ub>vjjA!nWbPF-l`;{rJJu-IUyumfr#;lO`X}^7#O&x#C7L)#Hf};zO@4{$$ zYy36D$HGxm7<*jzhi zE(}_1FV%p(7W9g9M-|(}<_8(IHSbHTV(%rwc-QNvZ~a-k{`cpyt9t>KtpA_w@Aa$k zU%StrKdk?6rDv`^Zlq097wT6aSB3#nZ?bp2Y<6Q8BISHKuk%#p-#Z}nrA_7?bM?VDn@Zx)#t zNz~j#d)&hcc6~8bnH!_S+&flUFfHdELFl-)Tqt-r%m=!4U1aabZ@6B1HS;#-GI!4xgX6?eqvOK zgPqDsMaEbjWt?MLI2D7S>bOBP&NYYC7#07CYpIQDy3%|0hR!}9?gvS>pi!)0;C-=N zj!Y55+6TYTU#N>l{7(Er8&*1|af7)UIZJ_XrzOy<>;W*~AActv*VS5nK{JYDc--wO zvq&wsUOrOAtrk#NAR32i6;`lDG5X#+N5C2-?#H^u+W+AMCsQ(}jN{F>vBduGKi{q1 z|GT^Y;>Cmge;dyhoFJ(*l~BBGI6zH16owf|Vx`4X6kVV(7GY-#-cN`Ck)zSJ5I$Ln0Sr$GVok2{v=!9b;d-`94ekUCMi}&=u?!{!>Q~$U>#fX5BYy?<=KK)L{c;* zP8xn7L$0irBQ=qAI*0J{-*GZ6zy9a;M9Neg^m=OA2*soqvq~ZyP8`MEId8WTUY7<48l+0Cl__Y1d)u-55OUA8?kMM5u5hxZ`GJ?afrmE^9U6B zkT*PsELC=DUP|MIQu4vII~#N^2#pUwt>DS{N0fG^SR!RTE0hhw4NE5VL4>0$NlYxH z-qCiorgLK&LGX8+Bn&<>p2UGcVrvVIspKrq^fpANqm{HVL{iRXB*p@eqq+0+seYxH z3%Hz+XaW(U$^_Exe|^wlh7* z)I&2w68+6CmMr&Mvg(kLh^1IBreZQ0VQuIN9GOLnYIX|_MYF9%+^iW%6S=;?xgV1J*LUv%>zfeHRTsQEJPp*p{}t?< z!$MRhh{q*V)Qj6jC1tTAN{uQhiybCC^_xo4gjtSC#HJ~un93ZX!mn~(W|dY{3{h%S zxj9s{KpmkJ4N0=1NHqw{m1J!7r%4)@3*7ZZX%h|jqaAs44wd@0)8KivyLR-IhLnh_ zb-$;M-Hz`mJ?2r+-9eS??AVRO(1(rLZ;*;>)Oa)*W16uKnIHSL@VOY%C z=S_H`H03PiM0-K=qIyh z=1r)sil>^%aFlT(=N8ya;7PH(Wc=c!j_#^>S_tdmxHg_BC1;q=NQAGdmkv*k9fTVk z_eqMb$)A5`ocxPXi4r(rF&t)+i3laj%!hULr}d|))kqmxho>;1FvRSh=rxpHy3C5X za4+2Z<9f7g$5We7Z#ZXsE#v9Nt)@*qYp;b?gSv+OcH@TIrkICBXJ}ab+?kSQ zFOJyOcsxbp z{-t> za~G5QH)Bu~Pe~x{!g`3J3ryqlh9Q8z*>ER7PB1OYgRKRaD#0{+HEP;tMvMa)JJ3f75<<=F=WRqKm4X{3?tc@aMwrVV%?{!z z{yoD&7IsyQsQEfT)#}=fM?jP^X^;9SIeGuX83;0_wj3FeNKeyxXAVoZZRh0Gubzv) z_p-$-bDqVbV1o*~6{2ce;0H9s$xW>m+M2$?md0jht!0}PiKdD>commDR^E1mD*&d# zS~OVf_OTe~peQp&N?n*=AelCt8HID>w+}^%qfp=kN0RY!Ew!dN7qh5b=6sPuM_a0^ znTMO=jH_>RZHhCN@q~%GFp9eYcSUBU9;g?*oS%zSqjm1ofhzHChB+s38PqbKD93%2 zy_82B8-vH~g{fk5_&r038n_rxnrhq@x=~ldK>wSf2$%L74wHmk+9Q@GE;cV`(@HYa z=@>6yl=W!fqvFw^t?z(hZS9#!R!Ed2S_MBA9RR(ztXQt8P`<(S)tZkl4JNSO*NfirtEvxe6#n9|ai&x)Rd|CnRn3!O zm7kiw&a#m{&m2tW`A^>$qMc!HO7;J_z2yK3n~kHvEw%~ntC8;l&-0C(oP74U#RXnt zgYH`7%d-jVW8qD ze9e(q-_qmeVaYfe0yzig4!v@;5QgCak%rqe} zjc~J~^a?a=shkvF|GB;8m2KHp(H!(XHEC%#Wy{@Tt8GK();%_?d_I(Ek|))pG3JI9 z)i04Bk=BPr@+^>_^Uco}gP5=^iNT;?67{%dp5}{+5y!u0 zn95}CDo3S7$N8gkHEXKXh~?U!Bw6k)y4l^LjWPNP?u8pa0l5#__6IO3_4{Bpso8bvi zM!T-G-;KIaeSznLa_6)u-PAp+wLTPQ|F00@fA`^P_jqo$Y zB`G=l&F2-8%-kf?mF%*Wl0${`I;m*R?QHhcZvCoteV#I+yq-U%5Fw#`ZSx|!@u_$8 z&;%oni4kTc7%%u+L6y5w?1SepFCi zzpCVSzxuG&tuu>fw~)F8sZP%e*~ud*oB$b@nO-Xmv5B2m_-Gd#kv2j|0HZd;(wt?6 z0GYsGv9PNqw*zR%bVK@R+4z+*Nr~L2yyp1no0K;-dX3|!vqIhR=qViT(bzRzxO%?! z*m+m(Ze#%KW9QuwdA9@TUH5V;qqt=JDt9PcQTDtWGp;p$^>H)#&+m4~yzI-jBXcW^ zTZDu_H0G!@cI%GYk%O`!y;&c>9M3x*!yJ#>8pmaG9;0J+eVau=dHrewk_U?h(IR`- zQ=h@&b|*jWius5*C3L*p53_>L7d+ABdxg;VWVfqYE2LOLU(=@>c5bIhYVV#bbiD^kwr+G=u z$i>MiAF|EZ)T3iUKXp11jhBHna3mW0U}5bT zu}5=22K-50F~2oIJG37kCYNX~9s?N_cqp1Z)`x`Tq*lm~CO zIrNF3=-BDlJ#oF=y^J^iHete~?ky0M@PQ@S6qh&7pThCT@m|mko`T=ZTb;Em2y9xb zm)-i*=RT>UM*Dde} zTOq&}XbNbgxAabv*KE$Sk}hm`3^K|6jj$N}Z0=r08)NhBjd}rn7^Nvm{+= z$&X-KKBE*q7^zwQNa>? zovTLr{oB24q(8XAbt5a@H?s7KxoclMqQ~ZVq%_iq@kq^_Oyv7LX1X&OG?dQLnKCdo zsthHVOHmp&n^)DI*8pYz!gOx$570^1D?{}%2%c7``7#JJ&p)mdT`{YBwSh-Y)PGeV zDw*Efv(fu6M71F^Y?vAsW}N`(LYJxTlljIz>-61bUhT$BBOioOtUERGsM|%&D8d;OLS$Dq-bGHuDBnAdXlle{vKmQ$b zyg2E#OLnbT0Hwrs$^?_`=yh*|sN$&@rc6_*SnV1|kfQFnpRUYA?RC2FlcIvLgKT!; z^BPXGy|9GV2>ssMvZcEkxvu5%CD^D2-#NPFhGv z&PIm0j;*V~TFx$0Q>Cm4(({f&=<_3n5nPsPD>qLyJQIFPA4?c1#f9h`h!7bTt2b$E zbSp)cPep_&;)LZXP4j#JNR(^G3=7q~Q&>(K5uK}XEYK)=VdCV=vgUWq$c#c;EW*1? z3_Fu>Ji^?msh`O6_v@65%@Fst@}J#I;BJHj7CTl- z>?b-lgXj7n8O-(+v-~9RC`%wA^gsla@}$mWv^yvAquUFh$V>I|xs~?GF1ol&~^Oug|mnoWS*2{Fv5fMhB z@uvuwR6d-A=1VxX7UhneqGDI57knwRZ zF{MQBhvTC+Z_nOzch$806HTxX#RNH=o4GME$=J~)DD5%k*l2Y^;gS=nZyOP8Brg%i zotOy8$uN^80y(UR0Kb4z#uS3XGdMmA;ML*T@!8YP$K&^ZfA`aS_;`4FdieJJ@tZSv zcM3=E-o8G5fBf$48NB-|9KQVre1H7*^;5vaIPyNFI-dmNK&E;+4?AZzIgD#O_ugw) zEQugNbey3vhB2FAvq_L*J|)6Tkch^egiMLcPf_bm_zhh=^6)%756_)H|33f#|NrB> J+Z+IZ0RY0N)YJd~ literal 0 HcmV?d00001 diff --git a/ansible/roles/helm_install/files/redis/charts/common/.helmignore b/ansible/roles/helm_install/files/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/helm_install/files/redis/charts/common/Chart.yaml b/ansible/roles/helm_install/files/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..3f32f99 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.11.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.11.3 diff --git a/ansible/roles/helm_install/files/redis/charts/common/README.md b/ansible/roles/helm_install/files/redis/charts/common/README.md new file mode 100644 index 0000000..8dc47f0 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/README.md @@ -0,0 +1,345 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_affinities.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_capabilities.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..b94212b --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_errors.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_images.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_ingress.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_labels.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_names.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..cf03231 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_names.tpl @@ -0,0 +1,52 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_secrets.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_storage.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_tplvalues.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_utils.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/_warnings.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_cassandra.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mariadb.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mongodb.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_postgresql.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_redis.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_validations.tpl b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/charts/common/values.yaml b/ansible/roles/helm_install/files/redis/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/roles/helm_install/files/redis/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/roles/helm_install/files/redis/ci/extra-flags-values.yaml b/ansible/roles/helm_install/files/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..8c1dcef --- /dev/null +++ b/ansible/roles/helm_install/files/redis/ci/extra-flags-values.yaml @@ -0,0 +1,12 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +replica: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +auth: + enabled: false diff --git a/ansible/roles/helm_install/files/redis/ci/sentinel-values.yaml b/ansible/roles/helm_install/files/redis/ci/sentinel-values.yaml new file mode 100644 index 0000000..48dfa1d --- /dev/null +++ b/ansible/roles/helm_install/files/redis/ci/sentinel-values.yaml @@ -0,0 +1,6 @@ +sentinel: + enabled: true +metrics: + enabled: true + sentinel: + enabled: true diff --git a/ansible/roles/helm_install/files/redis/ci/standalone-values.yaml b/ansible/roles/helm_install/files/redis/ci/standalone-values.yaml new file mode 100644 index 0000000..dfef688 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/ci/standalone-values.yaml @@ -0,0 +1 @@ +architecture: standalone diff --git a/ansible/roles/helm_install/files/redis/img/redis-cluster-topology.png b/ansible/roles/helm_install/files/redis/img/redis-cluster-topology.png new file mode 100644 index 0000000000000000000000000000000000000000..f0a02a9f8835381302731c9cb000b2835a45e7c9 GIT binary patch literal 11448 zcmeI2cUx22x9_8Zh@yyC0I4cPR3wC|kVp$H5PB1ZB#;m~gf1}_1O)^|QB*Y2#4gyt zMlbeAQxFvp712m>hX{m(GZyaOx!ZkSz`gf*j(>!>GS^(QjWNFCGu9zzC!56!6&9jU zsKs`+R<0=2Tv-%qj{Ji8aAni0vR)KQEHl>HJ2pI#N)HP{sbegEe^b}f4US~Qs$;Cw z_4G(lQ96Ni5-o-l&d`YniiJz?dw66Zok|Z1{M|-RS5J47uKp%8#vQGzjxpCah7XLM zj-j5O@9*{`T2RE_9UAE9LI+xoBnmwuHj)v%{&$O@SQ71bZ+Kl2DH#*!W~guJ6YNcK zHZTeO`>F9kF${WS#P4QkJslGrH2U}5u}M)uzb^*{#nUN4$W@Fr%;@i-!xQO$57ytD z8g7NACsAo=gGf@4U2vq4|L;yBNa25X;tb>6G}|@C+Q2i|iEP41uyWQ#JBJ%4#8?F< zhJ?qHVxj_RUKr=dpkNn9ac?y zjjf4Zb6GNz?1!C-z2!T|`I6FNrJxgzEdNh&X5<(5KrG*81T6sFq zW4#j0(FP$=R0ktuGQKhJ>5aE1x<}i)7{Yf-uoVV+b&Uyx|9F@?Q)9v%9i!kom8xr_ zZyDi8quLlJ$HoMDMv5Etu;N25ccDbf0|Q5Hey zuy9hmyN{lmxi_9+6lmk*8e~ZKun3K`af^yY(`;?=K0&@@N^D4|ZmdgqQerIL%hrxy zM`KVuoedc@Cs*&NU@z}TC!YjoOLVwfgp0XBq^=QOH`LfZDk)yy$}P!&oMd9pNV2eV za19GKbTn|V3?&(oC>TsM-pj)_-XqzO!f>`XcTI{5(F-#P4|eb+*@i_q#RWz9Cb}lU zY#Dly5oBMwdxVb>J|ZxR=Hn2B(esIp*C%@g#keGsV#(gFfsDuqU)z`@qaaF%b3~+m zaCivCJ1!{#Z-OS0%&mNabUj@xDKwK12OC|CUMSr<(LBO6#=*zKmh2FMcXPB0wAZ(G zOLT#y3^MdVd+FN4H&{DMs+$|$6}}_mun)3ByGD6M!*@NGc#37RH*(}cY~w5(g7s}^ zzJ{deP$OjHtPIHRcv})F*daVJ9OLS0Leg`Niu1-BQNyAVy)cG$-t^cQdT5*_#!w#< z6bFATlk}s>LCIl6HxqO;(KpT^F4{I2o&%lvSS5MJQIi?*1e?StBg+V5BGsE18|WMZ zW@PMb;;rjJh)Z(TCxklSlZ>ctaW=Yo)I?_w8+1I}9FDPYfEk{Rp z-ILpbFFK4J$#eW+T6n?W^l5qJZKu}h5eQ&HLB6NEPly?kFMd+9B*;21U%BtZEs%?pK-aOJA&_a1j0}?4#_1KVkJ~ zb1N$=!~T5LHEY(Ki6ShOkvZyUtN}kg)=-p%p8{pGCE=%=k}YB~GBVw%{}0_J=Dl%c zNm&_1!2faZ@ZlzI_bqfh*IP+9=vgI}rUzo0%pyLDCQ>#KFyN?VT{J-WBp zCg)?vCAY`Vo>`JeY8zKB+H+FMc}Q(BfZHZ_r67MSRsc_F7C zywt+dvh}JDW@2iJ{P>7kKAJK=H#hgl*|VFbw_3cM9SuWCQ8xa>xpUb;&rXoK%1McR zdnJ}CDoP!Dk~;a<;97Nc@fy+86}OQmPoC7l)OA^+lzf7Q)M`ViYrc0ZqM~%M)1#Jw zfq};3$}t=HHo0Bw?;rEaU97FebB`UnQf>OC*hZRgcFPL?lreG9^k2lGzd~ZiK3a#p zy)Aj;;m?Kl6GpPH?|86AUw`=-!|+_};cth8N7wOf@{BbUz8)iD>yD^dMsdu3sjgcm zW@l$t760Qj&u4MW_&1NM{x!;Vb#<@w7h9$;X)0sy)@H8k+&6o3!pHoJ>1>urexUjA z{*xzH?Mxn059_{ZJ+&^q>tPZG7Orn-Nb**9*6rK+L&p|ya{?=HmlwBKDP8rsHUCla zcztmWHlwP_c!*J;ZD_dCY!|w?1!4%~4*Dqs@kWrCfWZ=WDk*3@9_9&Rg z5F_^U)4UCdKkmGFRb;&gdi0)Bv?Q|{S_2<>6TK>ZjGG z9{XfuWax&!)fbyTe+?S#0yhM(;#x%99AT%DK(HZO64jEUVl(pL5m;Fr&fwZPdwct( z%F5ZbwcE;A()V1-YtksJyA;;q7Ewg>BpOX=mYSHHB=uG^+sd@U;l8gOmt?7&bDNkM zu3c;-9?ml75dN@2UC14Kwt46Ayu3oYV;XHVV{t*d3K&7wJ2t7s>ig?!V=XEy zTei8u;}z>#O<*5YkYo515Ix`}TT?sxu@^5d`*D}*ybg@r&so3e+v-a$5 zZUo2RH_Wjx%ILh}MVX!$Yom3g3NIn!T+5S?3nT>6XvGdEb=qi07N;DkD{zvz0b5=XcjZ z%dB}4OKjOLo4Fig7=&v@&9~z!vfyUH!r>W8>ohdhn40=cWhIkX&B#nMgdSzQe698C z&HwuHqLib$HKeUX^6As3C7g@0e3~gTl8)Z9$CYK#Xmq|lJ(njRXfx1~)AFtJO6k9w zkVqt3A`$kLt7TxY0_>xW)*>D8sh7e;aXnntiCP%~e12HDO9BW)FCB@FA<~X*CewMZ8<>mEH4zkm8a-`u%DeyvRRBF-r^R>#fZ%=tc?G*uabB8b?O`
    gt0u3YK+3VbZ)7Cr=g*&)YecY;+uHRLladT3#L-cG+Z4CN zKHgdOtr;9@>`b#Ba+WJ_{4yh$6P!auoRD{nC(pX_v#ErxkgbB-IsWFP&W_}s%qJmt z*68S5|2A?sI~z}@uitaVzm50v=e`T{#J^q`x%l~|^x)X1PD%ECQ3)6r7}&pZlW#!x z&7i92f%*0h4r6aaZ}j74CZ$tSQjUNV{3&o&kyxTw>qazFXX3Act31{S^Z5WvqP%;8 zRA3x!@p~>={SD~@*D62Me{9N}gIbv>4iVJ*7S*vcbLjhxnzRda8yg$XpFfY95st75=?aFC9}wq0 zF)@LdjmM0RV|#f>r?%h2M3;$#Pa{fO)5_jR(9T%&W!y0{L3|lv0qAhQ-1KmF^PQ#ZP>O;1`%Dz!7D2~MQU ziWTB2Dk_SwVr4}3^O^Dya_Y^8?nk&h01c(e<# zn#xC@6l4*7aQx)S269E;yL${|OE<2}jM=QQ;)KaRrkkog^P>D`le>${Uf8$b)1Kp> zs|yMWF0X0_-=xd4u&0Y3+CqaAK$nqa%A?fkq#lyJoui-Hen2>X}amW4T zsHl&Pl3-Q+5SF_hB#eanpW7uIe*?#A_w<-e)@q~SyPJD|z&6%iOR!QgO3a%bavc#AD>^6l_mA0H!azv|NZlSo-Y>{7yA@;9CP`qRRbUn%6+e)_JqE@(-#Tj($SLZ+CP~bsy z^z=kD2pv1Nx#8&8SRE{FDJOU-zYL2!Y4}F2TC*4mHh4*NbaWfddEQmOd&^+c{tSn~ z1k|3gva*{asc|0Qw8G!sK8kJ7>0&1%@zj--rXaiP*RVBWzVhzUM4V8cKhnFInYN=G zH(rj*4J;8!8EBy5Shg7(^gc>&AhW&&K%fp5%101T+q}K=U2!!ZR+L`2pe;1(yhCD5 zwZLA;2r!@WQK-ns$WLFsz)Bk!GrmY|iT$d|OtY-O3CYmGMWK8=bZ|_=(b>~egE`;B z&DC`utbzl(e>s=PLdF(@$Qo;F>;K4_clR(EnVF@3${I-&7|nq*XIAU@RjD#F9Ja!- z1gAUx{SAmEZGM?Fa-d@lh@@~DD8%M>?%ZkX;BXAvqz+>)+VLR1jdqa4x`}xGPujwJ zjrkWlZqbNgA0GNlNuA6sD9GCRDA|r(xQNS{?CRYFpMIBcClzE-x)A0>$IUe54MLV{ z5YKI7rd=<`4U$-_CQ(64=M>`}9L2G*vBc3jiKCFeFf-JY%#xS?czdS-j34BuI)Bic zdm|o%rQaRgNkv&y6rbx8JJcqgxpGr3w#j*=j^6?ni&d~S!D(vw4hk!~PV_BJXp*+! z*WgV`k!x(MK7_Aw8b^7^?KuC1QSW&W@?d1=cj}_1HTsUh>3@C-(>ADeNNAuaxTPcrHRdNQYOyka!)+4~pt_Se?%`LR z;xnbVV}q7fEhKKOH~ewy)Ya|Slm*-y!(D7)Ma_K*%o**nUm81s`t?3R<$}}M49N5# zO;E?-RNO8Q>V&^~b0AxTn{2rwaMM6V%>xSL3uu>2xLgQhlQT=!U!rWP1^I{yYZ~lH zCiu;Dr4yU|b(q->0jx?`SqgmC)WAIw&;orf@uOty$OokjUf03z=V70$*Vfi9CpRq= z6BFxuqRFa7_Xe7xHZ?Q6iq)$~_dm)_vy2nS?9xW&ct6KH?@M@!S2w7Qj z=(&&|);AuKJ$mZYdgjW#!lRHK-s#T|KF`4lq}0^Zf;%qCq)Ex|YizU%>b>pm?R_vc zRSXGr(F4tL*c4;KurAeqW_0EIH*LSGYHF$oXUzJ|^&Mz>+qx9d`YfxIVcpv04@u z7P_>TibzHZ0gUm@th>A0*2$^4z=W}M*)l}U-VX0sxfY*Qo3UcUk-WUm9lIV^fIX2} z*E1>gbxsAwdp~^&w6d{TYiukBsv-@H2jbq;uaTU`k8PiyHdu7U^IHAuYulqIMoAFE z)@W)<+`D%Vl!lF+oh0gfNr@P^e{hY~PEJeofU`lfu=n>@z+N=MaRRq@2)rNV)8nJT z@2b%EV~6$~IkFUaoQa9NiHS*l&Du`{aQhXwH+5(=#3Ug)*1bHtRYpZ6C+V**r214z zInzX4$S#&HU5fJe_fOoltD&J`-itFv$e}!K6uCrNS{k|d=FO@(bLK2qw5Xx4Za)P6 z0|_6JB*5sp?lCk$heZuEFCHT1#U4bGY#KpPcA?gW4GpmOE{Mljcuy6aXct?-SY!Dp z3!^+du-4Tf9pP_A2VlmeeOgNzIH%HHIGOnLW(iBYVtfv6^)iTckbN8)b|_e zoVEKYtaXQkm(nQ~GAVfY2Jx!TxY1AzKg z4W8SD1hSHnb$$=x9jC6&zn46&{rZM~PG4U@@aeQd%k+=!L|h&=GXz-6GC}dyXhKH= z2RD-ik+|;JvqhU9@s`~U>t4gmwFDg}4-1F*+3qmBukUG|ofzK+)Lv&ArD)0Pt3LZd z-u1WIs95{?74S@)R;*r~3HkcRiWX(dl$3`&lRqTHrj?s_W*^34BcsF5WXY98Q$y+qe39R*cE(Z(E%W;ALmX4_goK$fL!PTdlU!K_L zGZ#S${GQ0RvabpPh7{k+h|59y-9z+iMMVWtafpeDDIuauU^oMX>B{wQ?}V2ATkEct z1-3xFh3IQt>T@|OYirQg8@QaLu3rBN++g$_kzWV_bX-8s-S9SA)$!xUm-D{1T24vr z2w=Sg{!3!L9JEK2I^=1iqodoX)C;`zI_trjLECF>-h2{kEhacNi=@1UIHQz$az!l| z8)%lcvZ~uaCu?84c=5SCkAqGC7kmLkoDB}^lYD!N{7&jB&RM& zLL_Grvo2fFWDTItH85H@m+%ZR2)7Vtv%1**&5dUws}CJmvmB zGtS1^z4OMP90a2@w6(J{cXmF)lV1!SAerA&p@-{k0=K79n$nUDi8{9rS~df41@yU! zPg8_JTD8h3G71pC{{UNtA{1TRzuwRQ1S%=35%6pHJ^Yjj1%R3w-0SN*GK-4VKt?7H z+!zW;Nl8h@(I3T-s)4h!D)aJ1NHvB;hl3ET%cg*faQRJkm08o#)4X;fbmidWztg>;!#64Q4Gor0j7EW=${<=D*66b5 zujA|8puV!ZCvyl$p>fKDE~*bq0LeZf$BO##)&K#p0HJ_%O}wcQ9Y9h)n8zMKySw-s zXZgX*^dPPxyvOhfvCXxg%sc?h3$;R^(_K(P3*UU9 zmo=l*kS={AxsHB14URAhG(H6XjehrIFNH9@s`H1Z0V^NwC<~Q+g!6yY>m|$gN=Rl^ zC)eE2fUMg+rTS^MrFP;Z-c2n#cY!_uVPeiZze#uwRB7j-`Ucr7 z1d%86&I~B~qfip>kaqztb`AD8{~L0y=zSrD@@I~B`d2Tngq+?T{yoYEXi*LkuBUpH z0H|_0bQSw`DM}TKRR+*p$t3cnann&fH_kwO9H+mvI*@?5h2v`?1mF>%Lx?i#e=%R@ zUH7lahQrdIgA+lKduYc@^~aB7Sa?aW0Ti5oJ6nWrkqTQAxuWOM%e$d(bO`56eWtQ} zAwti*<#NWldf&6c`I?qc=!pDa3fZy)Qd5EY%4(=&UH>3%Ritcc}65Wn3V;U zGj2e~MHl15QJ{w7^`fl(h_e)2ahe3|wfW)bzFeGAc0@N6uhW(&X@lDYn22!P7*?JX zS_5i1OF&7$Q0K$m87KWL`6GbnHRhxCfFD-V(Sg!n$ez3nUTd_q?Wd&9l;h%{m%vmJ zYTjW<6sS6P{@L>DPstVMK}`bn6c@P7#^Y}sX9^jr!V9FNkWys-P#L=!m_#Y3d=bA4 zshnkXunU{T&JRo5MO)*iN!DDV5)vM-K@C$91wg?Pk52;}n%U9eURqOj-w@Jegh-NE zX+Y}we*8ELCNYG$IG^^iz#NJ$PzKfFm-!=I+`%_M==-;pa)8z>^u1K?EVY%s@=l- zy!B2{l-)a99)1PoVKEd^t^nOjr_({?$m#Ja;%i|Y6XALlsj?* z>f2SmNb!5t=79F8f=nm{tq1iagrj;_Mx5oXzuzcQ%oKKJ3r$X3;Sc6QhcC~WWrCv& zXf3jCE0cQ)RgQir2!ajTv5P`MK6d%PFUPe+eHQt}6WlYhv(uo|$me#iWoEQhPkvbq zRiQcH$&UT0Oko%H9MdEgMfO%NSiHCiiXkN&7kd{M8Aw*3GGGEM3_qo)KC5{Der=#X z{~{HX6C8=gD>te$AQ(BYi$kGmp1&nd8(e8RqDB}}dyzL1z;k#dz?_lSAL=Aj`lBCJ~vZaRd#_rNgaC%{d^a1(O*c4V(IEKPIWeu+|! z%s41R;Cpis01Q##f4*n&tq)o>%MwlZf?K7)8x|0Q+B8i-h>9Z>i#>Z#rjwtJ+1lD3 z!9G!kDkObY&CdG<*>PCB)+SAnpzcWopr+1YmyVP=wr>GG+}yaX!FnYg^j|U!I@Z5A*1U%+~3V zCPXPnD)TY4C4fAUv>D!~AU7VA@vb0?g9P~_1vT!1n$8ix_3$2(n1nwesZB`Vk~c;^CATva2ZRgf1zp?(eu{DL2oO*VOmYB7 zBSIoS4h$fnf{?E}XTO9r-rHMl?%cV3-@h+{@KL~<7Y!45Ic+v z^)Lto6Ar$AW@U!Y=x^d~2*j3yWE7t4A3^l?BOoM{bpL#lP?Yxy3?WM>=}9Omx&{Tw zdU(5f2D|!)$OaI|a0%}F2YPrDy$K$F+9=8^${&_jKCGaGRZx&n(pFN24@E^;byWqs zKkZ#T2?2ixRFRd30S=kEx_bwZ14Fzd|Fj^GT|NJdW~Fl2&%!mz$V#6`a8_3jwp5Pr z{?jIm5FFwi81Sc=0*o&UkNX`DIWmaw=duSO%-fy7xT=JLH~JeoJkk4au+G{41$dfjkSY4t(D}p^=!R^ z)U|CKrYmJoGw(?iivqLLdoBFF*ll?<2 z{d}S{4I;xL&BN8)(F%r&3f=@&csYzVH1I;Yo0Ig+@#@xYm`Gbo z6;m~mpLZnL*4@I=+EvXe#N5m`(AL<@!_3GsBE%yyGAcA&UeVCaP}fffP1ZH`@bDwr zhD2D}kpm6o6;)Mze6)RWD3Yg@kB^c!$`GxJM4>$4F##cp{%-QFkw)lnTQgk~MI&8P z9Tk10zyNPEcN7YiN#By}ZyI8$7_3L|v~!E_G_ke}4lu`u==l2t+o5oZCgwN=4ABVf zui&QcjmD~akyP<08^&v>n44=yl98cgJ8fdPxxR9gqDi2sKN3SAn}o{~-Th!}D@b7- zBK+@V8i5Z|Ggr5W#Ca+Q!5y+WQr{-X&)36BG1AnE5r%G*B_;-qhuCJ z3XxYc5B0Jokc_ z1Ak8sy+8wFWt6*~nwd6M)r1rorlJ;Lr9cdJSMxSgQB?OfRkKvnLmDd)4PlJ%D3S^z zavWp#D8Z=u|LmCG+Xw#siwR1ok~bZf5C~C(k)Ad-qG*2D7mHoyXI?BnJ}PN*Kx35p zB0Edh32l>qHgUl2g57l&%5g8&D_%rGS=zPh21n3VO3m93Nq4b_7)maAO;F`;?+;dI z-ItZmu3&nclXqOPbM=E;&Mi0Mw-@mX$2tScS4HKOIzK$%kW*?FHp&rSe6q*<=x6aE zqfMNg>J+N6p7{PeDbDP4JY7#u(T%p7vpxBL@S=FafS-O?m}=Pk;F*MkEia0-M7p}V zc-4c&*_k(Q+Pt~zwx>CH=JnS3p_-(c8nsiWPJNi4&-gY!bOoI+J3VmwAhdCn=amm0 zH+=HsiKwjX&TZSa_22d72@Vdnu(y|9yOvYvO3Uev{k8tx(8%cV+qw`=LBVvVhN#Cw zLjofsBfQ6=l+*S1@0E2(-XWms^8M5B)An}lxHwI-iyCKMzs6v(*hW9q&9zRg+}zxU z4j*ozS(RlK7iSg~34VOnAdr%hg0FI6f)VsHg)^$E+^rb`NfeiQbaZvIb8=46=~7+QAA^@?ael-1x%chcmnFW9`5+-`|sGz2%raXVf(`V0i3#DLBQH>AZa&2a2=T~O^{ytu7 z_ntkdHWCdMm|YOL>og08J_eIBjl3EpgIlwDo-b!G)p3ziNGKycoxO{cpb_=;P-$uD ztGW=+n+|mtvLNPAp49N0p}>*sdv4gE+APmW+f;m@<~!-shs{^TtOj?H z3|;Z`HzvWsiitagQf-_fQFqNX>Wm2l31a7^jLVlr{RVEKU_915DJBHl_U#HDPj~6* z>wl@t^21tLolZ_>-LiFS-A|R;t4C#UO^_=i3k#WFzmmHwvGX4upPufy!86%eET_S? z?FlB-z0B<5sp4XhVsiFCD!23pXetu_(~``!`vi6N+V0)EUqSA2@$jTkN{!B(Ia51V zQC~$G)9 zYeYmwrckuanlL?8u(o4U#qoQEP6w1;y}D)ie&`;d(?ut!bjv*l&7OQ7YeThN*2=zk zF=KHts7t|+D1+M(wcKMT8s+r`Ah^x8yOs<60_c?`zhDLm8KUmT?H?Qdx5)dH!?D_MFWZI#S$rq1v zB+wAf5s2Dv@Q!zXOEuAN&0x^rX?S5=J_%t|iN3u&ey6>3h?*0=V^7W~8OvepuYS zvHk<{P1I<&Q|vMhYh$B!B)(gK|5{m@Ze`yU$NDe^tnu^n1LmgXJ4J2#wb~hfxo<8z zJA2>w50|v&B#yC>B$0n!H2PB=3kroQb^j@GwmkvTY3y#FQ}dUJ=S&XqN==Q8$Nl=N z9^28KV?J*cH)DPl{V#bM3xnx-yaP3}NgPM4mWxYSY)bJ`M(cy1MRT<`7O_ zS&2B8pFj5K*SZ{Z1c=Sb&E4AIlqrGE!&jnn#BbWy?BtPi;`%AH>U+?v==j3B21pOa zB;M3b+DVT{dILn$>>~eRy6ei~_wNR=vB$oA`NG)Rzt$Jxdojmi*OV2Nl$a1Ul};_= z?PobfL^8X}%yo0bV>VW&u2)vJtu9TGzJ0Vs^!+N3k(S<$a8c%02z+;RZdn)^gotVo z1{PsuXUF#J(aM)~Je8T53HibT`LcEE))dMsv7wqkgPC?Jo;qch5&8iSAKyzjefpBwFgiTI=#4{#8O>d{c3I*NgCZ^R;=lN7vkDzJ4lIuE4zn=j{mtZ1sLs$wt;IpVe=%!g!U@8!+H#$KE(3~X|%7FAQ@$q{#L zN##by{X~*Agp7@i5eV4Hd~3^dNv*B=bB{e__HOSjw`YMB+|DC=`QE*~XAhd|!~FR8 z_+V}5F($YMJYDKqp64&E)$u*PG;18L#Q9MizAjh-arydnR*j20Z7LidKR(S;xIVZ6 z`9qfC6LR8|XW>^5JwXc_wE$g%13l{hqmSaUg!# zjYh}&`Q4OqD)UK$IdQm8ZeP>1$Ki0jow>QWBR~`Kg)d)rbbq~dL*S9w4vw^+7aF3! zkFHJ?GeCl|u~BJzRif_|-WkhKd)EjYCZWMRaFR-QW7#R#SmoLb`}iQQq6h2i?(Vw38r1=E|{@#%l6Kh&4Fnyf}Txn9EYy_yX2aTKPy=i-Trn`)RLyG(v zhK7c_{@|1P?E&_{z(9ao{Y#fFy`)T{W&z(%fKUONXTYj9kmY-06sQ9Jpy^lp^iU|^ z0LAml-n%J+>SBPj0Q#Kf$CkM?*S}f;8ZsRT``FlD?GE9vRAW_2tEzTwY-|9`ZT=c{ z%pwhLE@+L(sUPcZ!V|uOw&fy*U)$7l$KXAGX$WZ>8aE~-EzQZxdq{(M z_%cB3^@<9BqoM@rEVGZV@5szd3Zz&vWwaNvnZ+n4Xns%ys5%8Gx%A;fVU3HFHYaZJ z{rfiq)&V~_0LK%VcM2wh{7I&aSeqQ!!@PqZ0ZFKg)XGhCnc65)a zgoMOM$B_j+ZS9lOhYHStf~;_?cLO{ty^@tyjUDbExb4ZgYu5?F^Sl&FsU)adl#7cf z#BvbSMk7YzNlS}%W#3ri$<3u#t`Fur$E64#i*o5IMM3}LGPq5MqN9k1!n+e z_e)6pl}E#Gjx0Pl4U$XfHu0qn3Uy{0nToNtPQ0MxIZ8S5sgaJmq1U$Wj4SPRu^k}) z{qO}!``Lr_>jJN18T%_eU9Ydgu{vtm2Q*m;XfiP%BTeC$MeVN(??rt3D$caHaSF9d zKwei9U-EyQAL8LS_H|dz*|R7Eu`}+p98=Kmy95NbLynjfAvq76H451}^y<|KfM@rn zgl!DbaLv4!^7gGmSGA!8L(+0+{oGYjQsM=YrZt`K^kMsZ*T1!wr>3%rA3V4piR2X) z7UtE8J9^K5=)fO6>rovtWu~X6ciFsHbGo~XgZSe7NsuS3Q=PDH=dT%~(bfJ#swTyn zLf_v-%Q3U_o_PAy@G6y>LeZCM!$=GWX~hYC{`@&&@`C4wxw*7z_ck$QWv=aeWsdv! zNSPHOM_wNVP?x`8!YB|xWi#pyS;zY>5I+7kaR=~nUIjc z$j!yCUlSg?G$wQI-^I;+W_sF-Q5O}e1?m%t2ZBRFEFB!ub8>c{%grqT!P2;VWilVW zf=+8`X?gncB??-p2EI#=T$*%!c1|)mHMI#wk|&Moff5lF6-B@_C8wpGZuih3k*Yr| zuzK^Ma}yZk1x#G+*(odzxtenD(ut%b7Q31N&?2wyhx&q?YM!r!%8uyc zuo|8;&yf(ILG zbEkIiQGtmc`9lgxfZlG4A09InT-@kXem;+WIxneviCsif5Y`&X7x=9Gwef=l3dePQ zZCTdL{EZ;Is5+y(xN@b7j*~ViRNY}~Yg-$$st6kDG*HbIkl*o98z3MS7z00e@W29x zJM-d&38+x+9Pxbz4lu(jg>)U>x5kamb(pQOpGn*yP!RhN*rw)Plr&kxF-^9!t&JuA z$I$zr**7$zh`EW$_%~ynDr~z%z`fjeCT3=4Y1{7_!7$my#qr&pq-Y5n$oTG`J=U{6 zZ=Kq?@pSR}^z`(-{?yW-XbDd@T8>P>TVoc0ml$b($$(06wLB?xy%~FNG<2~9y^?ivgWfk=c}B{*6FA%m-FDSxA0+prtm z^D;;}bVoqtr!+v&f_txje5%j1ry<@t4R1%|n&awoi9}*5WxThx;XddaW|A~cGoW&? z1=Ev!tsI*p9zd09n#V;0eYcfkkJL8!fHpo;o9iP`V-pHWg|!Cry?GuNi>I@NL)jTX zeKL#_phZ?zR%7`&!5sMK_k~k$ zYjgQC^aH5k+XE^`tFeyh_L6mMH8u)(x-2x*8~rwGCxbgUYHe)XJm0g5rz--(;cu6p zu$PqNtg)GLr9B&OEk7rKplz$MiLAy7L8m(jLpP4%>0w@L?H%w}>d=jKJpJ(3q4IND zE$u=<(RYQ=%W^@|689-XzRzn-X$3EdP$akmBmy%x{#;#R6ua5`4Vql!Mdjsp9gF^^ z2Ws%u!`Nsj;l@7IFC@a&3#gx0Wmq4KyFdlthv-uTQ()163ZOn`f}&T)%xou+7wSD$ ztZ+b4q-How;ONn#LEneNo59@xtelE@d;BbQZT(KuMOI#>eqi_xITRU!GkQTtEHg~dIyyQo^1M~xbpWnHeGnJqeQ9*m?c>|JJs|260Rn4DW3Omd2LTs<>rFE= z3VJCkdMu_kPYQjvx*CX~ea*N11k+}D-(H!RpP$f>Muwe6qm^FCZVl3e(v24I?g1Y}sy)|R&dJZ83RVpkhfARl@^9Ewy1_-(ZQD+Y z8}S|qlhIuHuo-x!1u63E@(RXZgEz&jtoe4QW*Q(CUtGk&nc_tTi8AVxt5?N=7*E1@7dcvTXMu>j!0pg8FxbC; zzb;v@gd%(_6|^|{(_22M#<+x%xK1p6mZo+jxfw6d*Sv2nuX&dWf?;g7=LRtNvs`JL z#n))L5=I^NFs2Tu9x*kRDe147kni(vGlYX)|9_Yv!Au-sF>I$ZEbDY_>6sZ%u~&ArBk|wG!P{`p!p2V&BJGweO`jP?z&+k2NdN@ z=E~um(iJY9Y^GA9X5UDztgcGVY7-9QtKGK#TwnhTHB2f+XXl3)WYUePW&*V!E9(Rh zM`>+s|4iQm1soYtpJTY|SZfBT8X=@e$HcSMAR`)VU)~J;GA9;CHNrElKUz}tLrg72 zda`+jI#?6v9J4l4W>z1w)&K?oJFon4GP@5`N79^nv}Tbtop_s1NP~PlAfV!7zG?j7 zBS!?FE`X}0^vWw!$b{g@7$g<@lBo2cp~2GH`oP>oj=!u!EyCQ~{GqskE3DCno2yw3 zSRg-!d>op{9^15OlZCAu4ajB++Su5z$jQldDX7P_fSLuh;v5nU$7R4FWaZ%C5o$h6 z(mZEgv6@B2Izz=R<9Pqgi2QgAI#1>?6lHqg4i#}MXSr(%%J#(m_~yE@II{DMXkXOV zE`!fKHqI&FDN=`B<6Z?!;)(_jq=xYc{#Oz!Vrzt z4GIEprQpe`g;w-ba|OoK+}wanzVp&X|Jl;pkj|0Ck>*l&FSaS&BOfnt)HBLQQ3;8b zYfD**{sXtRa&mJ1_iiEj?jINCu8M%&y?==bKj{lmCfs zt#LEQhx&zwA>xJ4xp{c(7eq}#J}dYSDucg}>*=2i0xT(o|>K{kpItU_V5C|MX{qks>ED zsDof~;xw~jKaZk*{&@c@(X8zD#<`2?AqQsX<_d?s!K+|IfoKE|SRwL@2iPB1=yW!) zR%T{qs_-%qqbUa$flEq3k>!Ywd>puqII*@C^>a+*m&L@7TIc)ZZQxaxx?ks?I=#fy zkEa8^QO0+d-oMYEFXw!3qyE7IL7)j(5cqugNm*z>w3k-!Fbqu-7TqOUmNyjq|z zNO_NS?;>by79U8MmPx^Gr~-4OTrxk1vjtDydRRPY{K6EMscA0PjwuTSkqEzl$Y zJ-h;(7J$EfvxWra8@?e*1;MaFfKS_^7aEeFG>4UhLj#7-4pIT?)WiY5R!|E;FeCZz z%?wCZp@NLqYahGQ0ZkxMY3rNEq1FW}=HbSgFRU*>f=Ye2$pyu&fe%*?fMABIp?qp{ z+aN`v(R5N#qXKd5PhsHEh!sD_6FzTY0MP8JgL>j}^ITB>#Li~07R zRK(H2;^IzgZEdB03XL#@sy7jI$OyopzQ6yxNSq^NC1WmQolBr@-Rt-bTu>L)Lc;So zA8;yLmO8cMes8{wm0QE!ntoC}V5Ydfdk{&19UcXS%3`m5U<#$gr-)w@netuj+x;;pdLBjKx{+OdZa0uW*APf{{ za2?khGuHi;s2zGJ@RaVc@$($E#@vXe z=4OzFV~Z16OCG;{no~i4NUR*oblJWp;E<$bHiHAfpw!M3J_ao2Tf{Mu69OEGLo9yz zqT}dz(J$YCa)U}y z|MqaEnTt3GLA?x;KRLHhalLyc!p{h7GR0SSeQmlFraAspoDzB)_hX*XKuSsqJf(BDZYjPfQr{E)mc(LRZYQC!b6ex; zio0JunJ}i!|Hdq&O z5Dtc3m6?mcerHi4zGyTMiA`Xz7A4FIxVrzIzW~F60?g*w%*+l)KjN=nzt%I@zhg8e z`N|-sp$ps;#`%EfrAX$74<9n>2ylU_T^?_`dhOcilaxKOFRd&r5Mcal-M&{8tfR+| zA5XugZa-jh{siEysGM9UHIh<^>4V=Bg))8%@U(%X=>SF4nmwGI0JnmNqYDSaupM@Q zR%M(P^D1nDl8wPmU}YqkYp!m&aNz>O5h9Tkz+JZh1FxWfAJLb|<^*5CLIUNmAGilv zt&E>FXE?PF0KotMj!{=7h;wQ?rr`omD@a$t=x-mjF__(up9$XG*G6W_iuIu&TKhJ? zT}w-gJlDH}p}=8Fb8>MdfCX0yx(ZaV0LaQ$Am5=B9b -- bash + +In order to replicate the container startup scripts execute this command: + +For Redis: + + /opt/bitnami/scripts/redis/entrypoint.sh /opt/bitnami/scripts/redis/run.sh + +{{- if .Values.sentinel.enabled }} + +For Redis Sentinel: + + /opt/bitnami/scripts/redis-sentinel/entrypoint.sh /opt/bitnami/scripts/redis-sentinel/run.sh + +{{- end }} +{{- else }} + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have + most likely exposed the Redis™ service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.enabled=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if eq .Values.architecture "replication" }} +{{- if .Values.sentinel.enabled }} + +Redis™ can be accessed via port {{ .Values.sentinel.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis™ Sentinel cluster, which is available in port {{ .Values.sentinel.service.ports.sentinel }} using the same domain name above. + +{{- else }} + +Redis™ can be accessed on the following DNS names from within your cluster: + + {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.ports.redis }}) + {{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.ports.redis }}) + +{{- end }} +{{- else }} + +Redis™ can be accessed via port {{ .Values.master.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.auth.enabled }} + +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) + +{{- end }} + +To connect to your Redis™ server: + +1. Run a Redis™ pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env REDIS_PASSWORD=$REDIS_PASSWORD {{ end }} --image {{ template "redis.image" . }} --command -- sleep infinity + +{{- if .Values.tls.enabled }} + + Copy your TLS certificates to the pod: + + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert redis-client:/tmp/client.cert + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key redis-client:/tmp/client.key + kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert redis-client:/tmp/CA.cert + +{{- end }} + + Use the following command to attach to the pod: + + kubectl exec --tty -i redis-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash + +2. Connect using the Redis™ CLI: + +{{- if eq .Values.architecture "replication" }} + {{- if .Values.sentinel.enabled }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.redis }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.sentinel }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }}-master{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to redis. + +{{- else }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{- if contains "NodePort" .Values.sentinel.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.sentinel.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.sentinel.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.ports.redis }}:{{ .Values.sentinel.service.ports.redis }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- else }} +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.port }}:{{ .Values.master.service.port }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.master.service.port }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- include "redis.checkRollingTags" . }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctl.image }} +{{- include "redis.validateValues" . }} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Release.IsUpgrade ) }} +{{- if $.Values.sentinel.service.nodePorts.sentinel }} +No need to upgrade, ports and nodeports have been set from values +{{- else }} +#!#!#!#!#!#!#!# IMPORTANT #!#!#!#!#!#!#!# +YOU NEED TO PERFORM AN UPGRADE FOR THE SERVICES AND WORKLOAD TO BE CREATED +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/_helpers.tpl b/ansible/roles/helm_install/files/redis/templates/_helpers.tpl new file mode 100644 index 0000000..a9c0d03 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/_helpers.tpl @@ -0,0 +1,291 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "redis.sentinel.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "redis.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (and (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing Redis TLS certificates +*/}} +{{- define "redis.tlsSecretName" -}} +{{- $secretName := coalesce .Values.tls.existingSecret .Values.tls.certificatesSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.key" -}} +{{- else -}} + {{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "ca.crt" -}} +{{- else -}} + {{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the configuration configmap name +*/}} +{{- define "redis.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "redis.createConfigmap" -}} +{{- if empty .Values.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.auth.existingSecret -}} +{{- printf "%s" .Values.auth.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis™ secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}} +{{- printf "%s" .Values.auth.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return Redis™ password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.auth.password) -}} + {{- .Values.auth.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "redis-password") -}} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.sentinel.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.topologySpreadConstraints" .) -}} +{{- $messages := append $messages (include "redis.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "redis.validateValues.podSecurityPolicy.create" .) -}} +{{- $messages := append $messages (include "redis.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis™ - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.topologySpreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.topologySpreadConstraints -}} +redis: topologySpreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis™ - must provide a valid architecture */}} +{{- define "redis.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}} +redis: architecture + Invalid architecture selected. Valid values are "standalone" and + "replication". Please set a valid architecture (--set architecture="xxxx") +{{- end -}} +{{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }} +redis: architecture + Using redis sentinel on standalone mode is not supported. + To deploy redis sentinel, please select the "replication" mode + (--set "architecture=replication,sentinel.enabled=true") +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis™ - PodSecurityPolicy create */}} +{{- define "redis.validateValues.podSecurityPolicy.create" -}} +{{- if and .Values.podSecurityPolicy.create (not .Values.podSecurityPolicy.enabled) }} +redis: podSecurityPolicy.create + In order to create PodSecurityPolicy, you also need to enable + podSecurityPolicy.enabled field +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis™ - TLS enabled */}} +{{- define "redis.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.autoGenerated) (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret) }} +redis: tls.enabled + In order to enable TLS, you also need to provide + an existing secret containing the TLS certificates or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* Define the suffix utilized for external-dns */}} +{{- define "redis.externalDNS.suffix" -}} +{{ printf "%s.%s" (include "common.names.fullname" .) .Values.useExternalDNS.suffix }} +{{- end -}} + +{{/* Compile all annotations utilized for external-dns */}} +{{- define "redis.externalDNS.annotations" -}} +{{- if .Values.useExternalDNS.enabled }} +{{ .Values.useExternalDNS.annotationKey }}hostname: {{ include "redis.externalDNS.suffix" . }} +{{- range $key, $val := .Values.useExternalDNS.additionalAnnotations }} +{{ $.Values.useExternalDNS.annotationKey }}{{ $key }}: {{ $val | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/configmap.yaml b/ansible/roles/helm_install/files/redis/templates/configmap.yaml new file mode 100644 index 0000000..71a710f --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/configmap.yaml @@ -0,0 +1,60 @@ +{{- if (include "redis.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + redis.conf: |- + # User-supplied common configuration: + {{- if .Values.commonConfiguration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonConfiguration "context" $ ) | nindent 4 }} + {{- end }} + # End of common configuration + master.conf: |- + dir {{ .Values.master.persistence.path }} + # User-supplied master configuration: + {{- if .Values.master.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.master.disableCommands }} + {{- range .Values.master.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of master configuration + replica.conf: |- + dir {{ .Values.replica.persistence.path }} + slave-read-only yes + # User-supplied replica configuration: + {{- if .Values.replica.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.replica.disableCommands }} + {{- range .Values.replica.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of replica configuration + {{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + port {{ .Values.sentinel.containerPorts.sentinel }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.ports.redis }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} + # User-supplied sentinel configuration: + {{- if .Values.sentinel.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }} + {{- end }} + # End of sentinel configuration + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/extra-list.yaml b/ansible/roles/helm_install/files/redis/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/headless-svc.yaml b/ansible/roles/helm_install/files/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..d798a0b --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/headless-svc.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- include "redis.externalDNS.annotations" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-redis + port: {{ if .Values.sentinel.enabled }}{{ .Values.sentinel.service.ports.redis }}{{ else }}{{ .Values.master.service.ports.redis }}{{ end }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.ports.sentinel }} + targetPort: redis-sentinel + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/roles/helm_install/files/redis/templates/health-configmap.yaml b/ansible/roles/helm_install/files/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..41f3145 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/health-configmap.yaml @@ -0,0 +1,192 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash + +{{- if .Values.auth.sentinel }} + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert "$REDIS_SENTINEL_TLS_CA_FILE" \ + {{- if .Values.tls.authClients }} + --cert "$REDIS_SENTINEL_TLS_CERT_FILE" \ + --key "$REDIS_SENTINEL_TLS_KEY_FILE" \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/ansible/roles/helm_install/files/redis/templates/master/application.yaml b/ansible/roles/helm_install/files/redis/templates/master/application.yaml new file mode 100644 index 0000000..041ce25 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/master/application.yaml @@ -0,0 +1,467 @@ +{{- if or (not (eq .Values.architecture "replication")) (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: {{ .Values.master.kind }} +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + {{- if (eq .Values.master.kind "StatefulSet") }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.master.updateStrategy }} + {{- if (eq .Values.master.kind "Deployment") }} + strategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- else }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName | quote }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.master.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- else if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.master.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.master.podSecurityContext.enabled .Values.master.containerSecurityContext.enabled }} + {{- if or .Values.master.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: redis-data + {{- if .Values.master.persistence.medium }} + emptyDir: { + medium: {{ .Values.master.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.master.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.master.persistence.existingClaim .) }} + {{- else if (eq .Values.master.kind "Deployment") }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/master/psp.yaml b/ansible/roles/helm_install/files/redis/templates/master/psp.yaml new file mode 100644 index 0000000..2ba93b6 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/master/psp.yaml @@ -0,0 +1,46 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.podSecurityContext.fsGroup }} + max: {{ .Values.master.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/master/pvc.yaml b/ansible/roles/helm_install/files/redis/templates/master/pvc.yaml new file mode 100644 index 0000000..2f31036 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/master/pvc.yaml @@ -0,0 +1,26 @@ +{{- if and (eq .Values.architecture "standalone") (eq .Values.master.kind "Deployment") (.Values.master.persistence.enabled) (not .Values.master.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/master/service.yaml b/ansible/roles/helm_install/files/redis/templates/master/service.yaml new file mode 100644 index 0000000..37ac3d1 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/master/service.yaml @@ -0,0 +1,49 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.master.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.master.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.master.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.master.service.type "ClusterIP") .Values.master.service.clusterIP }} + clusterIP: {{ .Values.master.service.clusterIP }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.master.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) .Values.master.service.nodePorts.redis}} + nodePort: {{ .Values.master.service.nodePorts.redis}} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.master.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/metrics-svc.yaml b/ansible/roles/helm_install/files/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..94459ec --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/metrics-svc.yaml @@ -0,0 +1,38 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.port }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/networkpolicy.yaml b/ansible/roles/helm_install/files/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..64c0505 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/networkpolicy.yaml @@ -0,0 +1,78 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + {{- if or (eq .Values.architecture "replication") .Values.networkPolicy.extraEgress }} + - Egress + egress: + {{- if eq .Values.architecture "replication" }} + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/pdb.yaml b/ansible/roles/helm_install/files/redis/templates/pdb.yaml new file mode 100644 index 0000000..f82d278 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/prometheusrule.yaml b/ansible/roles/helm_install/files/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..cd8bc68 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "common.names.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/replicas/hpa.yaml b/ansible/roles/helm_install/files/redis/templates/replicas/hpa.yaml new file mode 100644 index 0000000..468a504 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/replicas/hpa.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.replica.autoscaling.enabled (not .Values.sentinel.enabled) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/replicas/service.yaml b/ansible/roles/helm_install/files/redis/templates/replicas/service.yaml new file mode 100644 index 0000000..95cd756 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/replicas/service.yaml @@ -0,0 +1,49 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.replica.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.replica.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.replica.service.type }} + {{- if eq .Values.replica.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.replica.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") .Values.replica.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.replica.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") .Values.replica.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.replica.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.replica.service.type "ClusterIP") .Values.replica.service.clusterIP }} + clusterIP: {{ .Values.replica.service.clusterIP }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.replica.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.replica.service.type "NodePort") (eq .Values.replica.service.type "LoadBalancer")) .Values.replica.service.nodePorts.redis}} + nodePort: {{ .Values.replica.service.nodePorts.redis}} + {{- else if eq .Values.replica.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.replica.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: replica +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/replicas/statefulset.yaml b/ansible/roles/helm_install/files/redis/templates/replicas/statefulset.yaml new file mode 100644 index 0000000..14a1924 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/replicas/statefulset.yaml @@ -0,0 +1,455 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.replica.autoscaling.enabled }} + replicas: {{ .Values.replica.replicaCount }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: replica + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "replica" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "replica" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.replica.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.master.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- else if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- else if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + subPath: {{ .Values.replica.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.replica.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/role.yaml b/ansible/roles/helm_install/files/redis/templates/role.yaml new file mode 100644 index 0000000..0475e0d --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/role.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.podSecurityPolicy.enabled }} + - apiGroups: + - '{{ template "podSecurityPolicy.apiGroup" . }}' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: [{{ template "common.names.fullname" . }}] + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/rolebinding.yaml b/ansible/roles/helm_install/files/redis/templates/rolebinding.yaml new file mode 100644 index 0000000..74968b8 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "common.names.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/scripts-configmap.yaml b/ansible/roles/helm_install/files/redis/templates/scripts-configmap.yaml new file mode 100644 index 0000000..2123d6a --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/scripts-configmap.yaml @@ -0,0 +1,625 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [ -n "$REDIS_EXTERNAL_MASTER_HOST" ]; then + REDIS_SERVICE="$REDIS_EXTERNAL_MASTER_HOST" + else + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + fi + + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + validate_quorum() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel master {{ .Values.sentinel.masterSet }}" + else + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel master {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $quorum_info_command" + eval $quorum_info_command | grep -Fq "s_down" + } + + trigger_manual_failover() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel failover {{ .Values.sentinel.masterSet }}" + else + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel failover {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $failover_command" + eval $failover_command + } + + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + + # check if there is a master + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + master_port_in_persisted_conf="$REDIS_MASTER_PORT_NUMBER" + master_in_sentinel="$(get_sentinel_master_info)" + redisRetVal=$? + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + master_port_in_persisted_conf="$(awk '/monitor/ {print $5}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master ${master_in_persisted_conf}:${master_port_in_persisted_conf} in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + touch /opt/bitnami/redis-sentinel/etc/.node_read + fi + {{- end }} + + if [[ $redisRetVal -ne 0 ]]; then + if [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 1: No active sentinel and in previous sentinel.conf we were the master --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 2: No active sentinel and in previous sentinel.conf we were not master --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="slave" + REDIS_MASTER_HOST=${master_in_persisted_conf} + REDIS_MASTER_PORT_NUMBER=${master_port_in_persisted_conf} + fi + else + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "Current master: REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + if [[ "$REDIS_MASTER_HOST" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 3: Active sentinel and master it is this node --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 4: Active sentinel and master is not this node --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="slave" + + {{- if and .Values.sentinel.automateClusterRecovery (le (int .Values.sentinel.downAfterMilliseconds) 2000) }} + retry_count=1 + while validate_quorum + do + info "sleeping, waiting for Redis master to come up" + sleep 1s + if ! ((retry_count % 11)); then + info "Trying to manually failover" + failover_result=$(trigger_manual_failover) + + debug "Failover result: $failover_result" + fi + + ((retry_count+=1)) + done + info "Redis master is up now" + {{- end }} + fi + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" = "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.replica.preExecCmds }} + {{- .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libfile.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + SERVPORT=$(get_port "$HOSTNAME" "SENTINEL") + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + check_lock_file() { + [[ -f /opt/bitnami/redis-sentinel/etc/.node_read ]] + } + retry_while "check_lock_file" + rm -f /opt/bitnami/redis-sentinel/etc/.node_read + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master $master_in_persisted_conf in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + fi + {{- end }} + if ! get_sentinel_master_info && [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # No master found, lets create a master node + export REDIS_REPLICATION_MODE="master" + + REDIS_MASTER_HOST=$(get_full_hostname "$HOSTNAME") + REDIS_MASTER_PORT_NUMBER="$REDISPORT" + else + export REDIS_REPLICATION_MODE="slave" + + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "printing REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.auth.enabled }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if and .Values.auth.enabled .Values.auth.sentinel }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_known_sentinel() { + hostname="$1" + ip="$2" + + if [[ -n "$hostname" && -n "$ip" && "$hostname" != "$HOSTNAME" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "SENTINEL") $(host_id "$hostname")" + fi + } + add_known_replica() { + hostname="$1" + ip="$2" + + if [[ -n "$ip" && "$(get_full_hostname "$hostname")" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "REDIS")" + fi + } + + # Add available hosts on the network as known replicas & sentinels + for node in $(seq 0 $(({{ .Values.replica.replicaCount }}-1))); do + hostname="{{ template "common.names.fullname" . }}-node-$node" + ip="$(getent hosts "$hostname.$HEADLESS_SERVICE" | awk '{ print $1 }')" + add_known_sentinel "$hostname" "$ip" + add_known_replica "$hostname" "$ip" + done + + echo "" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + echo "sentinel resolve-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-port $SERVPORT" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + SENTINEL_SERVICE_ENV_NAME={{ printf "%s%s" (upper (include "common.names.fullname" .)| replace "-" "_") "_SERVICE_PORT_TCP_SENTINEL" }} + SENTINEL_SERVICE_PORT=${!SENTINEL_SERVICE_ENV_NAME} + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + run_sentinel_command() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@" + else + redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" sentinel "$@" + fi + } + failover_finished() { + REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}")) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]] + } + + REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + + if ! failover_finished; then + echo "I am the master pod and you are stopping me. Starting sentinel failover" + # if I am the master, issue a command to failover once and then wait for the failover to finish + run_sentinel_command failover "{{ .Values.sentinel.masterSet }}" + if retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1; then + echo "Master has been successfuly failed over to a different pod." + exit 0 + else + echo "Master failover failed" + exit 1 + fi + else + exit 0 + fi + prestop-redis.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + run_redis_command() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + redis-cli -h 127.0.0.1 -p "$REDIS_TLS_PORT" --tls --cert "$REDIS_TLS_CERT_FILE" --key "$REDIS_TLS_KEY_FILE" --cacert "$REDIS_TLS_CA_FILE" "$@" + else + redis-cli -h 127.0.0.1 -p ${REDIS_PORT} "$@" + fi + } + failover_finished() { + REDIS_ROLE=$(run_redis_command role | head -1) + [[ "$REDIS_ROLE" != "master" ]] + } + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + + if ! failover_finished; then + echo "Waiting for sentinel to run failover for up to {{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}s" + retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1 + else + exit 0 + fi + +{{- else }} + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + {{- if and .Values.master.containerSecurityContext.runAsUser (eq (.Values.master.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if eq .Values.architecture "replication" }} + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.replica.preExecCmds }} + {{ .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/secret.yaml b/ansible/roles/helm_install/files/redis/templates/secret.yaml new file mode 100644 index 0000000..aa2b3a2 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/secret.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/redis/templates/sentinel/hpa.yaml b/ansible/roles/helm_install/files/redis/templates/sentinel/hpa.yaml new file mode 100644 index 0000000..51f0f80 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/sentinel/hpa.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.replica.autoscaling.enabled .Values.sentinel.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/sentinel/node-services.yaml b/ansible/roles/helm_install/files/redis/templates/sentinel/node-services.yaml new file mode 100644 index 0000000..8bf7a2b --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/sentinel/node-services.yaml @@ -0,0 +1,71 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (or .Release.IsUpgrade .Values.sentinel.service.nodePorts.redis ) }} + +{{- range $i := until (int .Values.replica.replicaCount) }} + +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" $ ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "redis") }} +{{- else }} +{{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" $ }}-node-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: node + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $.Values.sentinel.service.annotations $.Values.commonAnnotations }} + annotations: + {{- if $.Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: NodePort + ports: + - name: sentinel + {{- if $.Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + {{- else }} + nodePort: {{ $sentinelport }} + port: {{ $sentinelport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis + {{- if $.Values.sentinel.service.nodePorts.redis }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + {{- else }} + nodePort: {{ $redisport }} + port: {{ $redisport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + - name: sentinel-internal + nodePort: null + port: {{ $.Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ $.Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + selector: + statefulset.kubernetes.io/pod-name: {{ template "common.names.fullname" $ }}-node-{{ $i }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/sentinel/ports-configmap.yaml b/ansible/roles/helm_install/files/redis/templates/sentinel/ports-configmap.yaml new file mode 100644 index 0000000..f5e7b2a --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/sentinel/ports-configmap.yaml @@ -0,0 +1,100 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Values.sentinel.service.nodePorts.redis ) }} +{{- /* create a list to keep track of ports we choose to use */}} +{{ $chosenports := (list ) }} + +{{- /* Get list of all used nodeports */}} +{{ $usedports := (list ) }} +{{- range $index, $service := (lookup "v1" "Service" "" "").items }} + {{- range.spec.ports }} + {{- if .nodePort }} + {{- $usedports = (append $usedports .nodePort) }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* +comments that start with # are rendered in the output when you debug, so you can less and search for them +Vars in the comment will be rendered out, so you can check their value this way. +https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments + +remove the template comments and leave the yaml comments to help debug +*/}} + +{{- /* Sort the list */}} +{{ $usedports = $usedports | sortAlpha }} +#usedports {{ $usedports }} + +{{- /* How many nodeports per service do we want to create, except for the main service which is always two */}} +{{ $numberofPortsPerNodeService := 2 }} + +{{- /* for every nodeport we want, loop though the used ports to get an unused port */}} +{{- range $j := until (int (add (mul (int .Values.replica.replicaCount) $numberofPortsPerNodeService) 2)) }} + {{- /* #j={{ $j }} */}} + {{- $nodeport := (add $j 30000) }} + {{- $nodeportfound := false }} + {{- range $i := $usedports }} + {{- /* #i={{ $i }} + #nodeport={{ $nodeport }} + #usedports={{ $usedports }} */}} + {{- if and (has (toString $nodeport) $usedports) (eq $nodeportfound false) }} + {{- /* nodeport conflicts with in use */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if and ( has $nodeport $chosenports) (eq $nodeportfound false) }} + {{- /* nodeport already chosen, try another */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if (eq $nodeportfound false) }} + {{- /* nodeport free to use: not already claimed and not in use */}} + {{- /* select nodeport, and place into usedports */}} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- $nodeportfound = true }} + {{- else }} + {{- /* nodeport has already been chosen and locked in, just work through the rest of the list to get to the next nodeport selection */}} + {{- end }} + {{- end }} + {{- if (eq $nodeportfound false) }} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- end }} + +{{- end }} + +{{- /* print the usedports and chosenports for debugging */}} +#usedports {{ $usedports }} +#chosenports {{ $chosenports }}}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-ports-configmap + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} +{{- if $portsmap }} +{{- /* configmap already exists, do not install again */ -}} + {{- range $name, $value := $portsmap }} + "{{ $name }}": "{{ $value }}" + {{- end }} +{{- else }} +{{- /* configmap being set for first time */ -}} + {{- range $index, $port := $chosenports }} + {{- $nodenumber := (floor (div $index 2)) }} + {{- if (eq $index 0) }} + "{{ template "common.names.fullname" $ }}-sentinel": "{{ $port }}" + {{- else if (eq $index 1) }} + "{{ template "common.names.fullname" $ }}-redis": "{{ $port }}" + {{- else if (eq (mod $index 2) 0) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-sentinel": "{{ $port }}" + {{- else if (eq (mod $index 2) 1) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-redis": "{{ $port }}" + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/sentinel/service.yaml b/ansible/roles/helm_install/files/redis/templates/sentinel/service.yaml new file mode 100644 index 0000000..622ed95 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/sentinel/service.yaml @@ -0,0 +1,96 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} + +--- +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "redis") }} +{{- else }} +{{- end }} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.sentinel.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{- if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") .Values.sentinel.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.sentinel.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "ClusterIP") .Values.sentinel.service.clusterIP }} + clusterIP: {{ .Values.sentinel.service.clusterIP }} + {{- end }} + ports: + - name: tcp-redis + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + port: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $redisport }} + {{- else}} + port: {{ .Values.sentinel.service.ports.redis }} + {{- end }} + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + nodePort: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $redisport }} + {{- end }} + - name: tcp-sentinel + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + port: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $sentinelport }} + {{- else }} + port: {{ .Values.sentinel.service.ports.sentinel }} + {{- end }} + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $sentinelport }} + {{- end }} + {{- if eq .Values.sentinel.service.type "NodePort" }} + - name: sentinel-internal + nodePort: null + port: {{ .Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ .Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: node +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/sentinel/statefulset.yaml b/ansible/roles/helm_install/files/redis/templates/sentinel/statefulset.yaml new file mode 100644 index 0000000..0be57ac --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/sentinel/statefulset.yaml @@ -0,0 +1,670 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replica.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: node + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: node + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "node" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "node" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.sentinel.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.replica.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.replica.externalMaster.port | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- else if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.sentinel.persistence.enabled }} + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-redis.sh + - name: sentinel + image: {{ template "redis.sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.sentinel.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.sentinel.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.sentinel.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.sentinel.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.sentinel.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.tls.dhParamsFilename" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + {{- end }} + {{- if .Values.sentinel.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.sentinel.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.sentinel.externalMaster.port | quote }} + {{- end }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis-sentinel + {{- else if .Values.sentinel.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh + {{- end }} + {{- if .Values.sentinel.resources }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.sentinel.persistence.enabled }} + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + {{- if not .Values.sentinel.persistence.enabled }} + - name: sentinel-data + {{- if .Values.sentinel.persistence.medium }} + emptyDir: { + medium: {{ .Values.sentinel.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + - name: redis-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.sentinel.persistence.enabled }} + - metadata: + name: sentinel-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.sentinel.persistence.annotations }} + annotations: {{- toYaml .Values.sentinel.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.sentinel.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.sentinel.persistence.size | quote }} + {{- if .Values.sentinel.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.sentinel.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/serviceaccount.yaml b/ansible/roles/helm_install/files/redis/templates/serviceaccount.yaml new file mode 100644 index 0000000..1ce68a7 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/servicemonitor.yaml b/ansible/roles/helm_install/files/redis/templates/servicemonitor.yaml new file mode 100644 index 0000000..0d94a4b --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/servicemonitor.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/templates/tls-secret.yaml b/ansible/roles/helm_install/files/redis/templates/tls-secret.yaml new file mode 100644 index 0000000..5b40dff --- /dev/null +++ b/ansible/roles/helm_install/files/redis/templates/tls-secret.yaml @@ -0,0 +1,26 @@ +{{- if (include "redis.createTlsSecret" .) }} +{{- $ca := genCA "redis-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/ansible/roles/helm_install/files/redis/values.schema.json b/ansible/roles/helm_install/files/redis/values.schema.json new file mode 100644 index 0000000..d6e226b --- /dev/null +++ b/ansible/roles/helm_install/files/redis/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "Redis architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Use password authentication" + }, + "password": { + "type": "string", + "title": "Redis password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "kind": { + "type": "string", + "title": "Workload Kind", + "form": true, + "description": "Allowed values: `Deployment` or `StatefulSet`", + "enum": ["Deployment", "StatefulSet"] + }, + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + } + } + } + } + }, + "replica": { + "type": "object", + "title": "Redis replicas settings", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + }, + "properties": { + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of Redis replicas" + }, + "persistence": { + "type": "object", + "title": "Persistence for Redis replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "replica/persistence/enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/redis/values.yaml b/ansible/roles/helm_install/files/redis/values.yaml new file mode 100644 index 0000000..d6abd90 --- /dev/null +++ b/ansible/roles/helm_install/files/redis/values.yaml @@ -0,0 +1,1536 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis™ password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis™ Image parameters +## + +## Bitnami Redis™ image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry Redis™ image registry +## @param image.repository Redis™ image repository +## @param image.tag Redis™ image tag (immutable tags are recommended) +## @param image.pullPolicy Redis™ image pull policy +## @param image.pullSecrets Redis™ image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 6.2.6-debian-10-r158 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis™ common configuration parameters +## https://github.com/bitnami/bitnami-docker-redis#configuration +## + +## @param architecture Redis™ architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## Redis™ Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis™ password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis™ credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis™ nodes +## +existingConfigmap: "" + +## @section Redis™ master configuration parameters +## + +master: + ## @param master.configuration Configuration for Redis™ master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis™ commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.preExecCmds Additional commands to run prior to starting Redis™ master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis™ master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis™ master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis™ master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis™ master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis™ master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis™ master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis™ master containers + ## @param master.resources.requests The requested resources for the Redis™ master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis™ master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis™ master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis™ master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis™ master containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis™ master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param master.priorityClassName Redis™ master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis™ master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis™ master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.lifecycleHooks for the Redis™ master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis™ master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis™ master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis™ master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis™ master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis™ master containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis™ master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## Redis™ master service parameters + ## + service: + ## @param master.service.type Redis™ master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis™ master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis™ master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis™ master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.clusterIP Redis™ master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis™ master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis™ master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.annotations Additional custom annotations for Redis™ master service + ## + annotations: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + +## @section Redis™ replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis™ replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for Redis™ replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis™ commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.preExecCmds Additional commands to run prior to starting Redis™ replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis™ replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis™ replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis™ replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis™ replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis™ replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis™ replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis™ replicas containers + ## @param replica.resources.requests The requested resources for the Redis™ replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis™ replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis™ replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis™ replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis™ replicas containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param replica.schedulerName Alternate scheduler for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis™ replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param replica.priorityClassName Redis™ replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis™ replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis™ replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.lifecycleHooks for the Redis™ replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis™ replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis™ replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis™ replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis™ replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis™ replicas containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis™ replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## Redis™ replicas service parameters + ## + service: + ## @param replica.service.type Redis™ replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis™ replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis™ replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis™ replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis™ replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis™ replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis™ replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis™ replicas service + ## + annotations: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + +## @section Redis™ Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis™ Sentinel on Redis™ pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis™ service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis™ Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry Redis™ Sentinel image registry + ## @param sentinel.image.repository Redis™ Sentinel image repository + ## @param sentinel.image.tag Redis™ Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.pullPolicy Redis™ Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis™ Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 6.2.6-debian-10-r156 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## NOTE: This is directly related to the startupProbes which are configured to run every 10 seconds for a total of 22 failures. If adjusting this value, also adjust the startupProbes. + getMasterTimeout: 220 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis™ node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis™ Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis™ Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis™ Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis™ Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis™ Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis™ Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis™ Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis™ sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## Redis™ Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis™ Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis™ Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis™ Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis™ Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param sentinel.lifecycleHooks for the Redis™ sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis™ Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis™ Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis™ Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis™ service port for Redis™ + ## @param sentinel.service.ports.sentinel Redis™ service port for Redis™ Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis™ + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis™ Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis™ Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis™ Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis™ Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis™ Sentinel service + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis™ is listening on. When true, Redis™ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis™ Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis™ metrics + ## + enabled: false + ## Bitnami Redis™ Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry Redis™ Exporter image registry + ## @param metrics.image.repository Redis™ Exporter image repository + ## @param metrics.image.tag Redis™ Redis™ Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Redis™ Exporter image pull policy + ## @param metrics.image.pullSecrets Redis™ Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.36.0-debian-10-r5 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis™ hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis™ exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis™ exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis™ exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis™ exporter containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis™ metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ metrics sidecar + ## + extraVolumeMounts: [] + ## Redis™ exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis™ exporter container + ## @param metrics.resources.requests The requested resources for the Redis™ exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis™ exporter service parameters + ## + service: + ## @param metrics.service.type Redis™ exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis™ exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis™ exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis™ exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis™ exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis™ exporter service + ## + annotations: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} down + ## description: Redis™ instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param volumePermissions.image.registry Bitnami Shell image registry + ## @param volumePermissions.image.repository Bitnami Shell image repository + ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy + ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r367 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param sysctl.image.registry Bitnami Shell image registry + ## @param sysctl.image.repository Bitnami Shell image repository + ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy + ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r367 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/ansible/roles/helm_install/files/vault/.gitignore b/ansible/roles/helm_install/files/vault/.gitignore new file mode 100644 index 0000000..99dd863 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/.gitignore @@ -0,0 +1 @@ +dont_delete_me diff --git a/ansible/roles/helm_install/files/vault/.helmignore b/ansible/roles/helm_install/files/vault/.helmignore new file mode 100644 index 0000000..4007e24 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/.helmignore @@ -0,0 +1,28 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.terraform/ +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +# CI and test +.circleci/ +.github/ +.gitlab-ci.yml +test/ diff --git a/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_221117 b/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_221117 new file mode 100644 index 0000000..07b6e52 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_221117 @@ -0,0 +1,58 @@ +injector: + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + +server: + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + + + dataStorage: + enabled: true + size: 1Gi + storageClass: null + + auditStorage: + enabled: false + size: 1Gi + storageClass: null + +ui: + enabled: true + serviceType: "NodePort" + serviceNodePort: 32702 + +csi: + pod: + tolerations: + - key: "dev/data-kafka" + operator: "Exists" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka diff --git a/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_bak b/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_bak new file mode 100644 index 0000000..fcfe83b --- /dev/null +++ b/ansible/roles/helm_install/files/vault/00.old/override-values.yaml_bak @@ -0,0 +1,14 @@ +server: + dataStorage: + enabled: true + # storageClass: openebs-hostpath + size: 1Gi + auditStorage: + enabled: true + # storageClass: openebs-hostpath + size: 1Gi + +ui: + enabled: true + serviceType: "NodePort" + serviceNodePort: 32702 diff --git a/ansible/roles/helm_install/files/vault/CHANGELOG.md b/ansible/roles/helm_install/files/vault/CHANGELOG.md new file mode 100644 index 0000000..df95800 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/CHANGELOG.md @@ -0,0 +1,433 @@ +## Unreleased + +## 0.22.1 (October 26th, 2022) + +Changes: +* `vault` updated to 1.12.0 [GH-803](https://github.com/hashicorp/vault-helm/pull/803) +* `vault-k8s` updated to 1.0.1 [GH-803](https://github.com/hashicorp/vault-helm/pull/803) + +## 0.22.0 (September 8th, 2022) + +Features: +* Add PrometheusOperator support for collecting Vault server metrics. [GH-772](https://github.com/hashicorp/vault-helm/pull/772) + +Changes: +* `vault-k8s` to 1.0.0 [GH-784](https://github.com/hashicorp/vault-helm/pull/784) +* Test against Kubernetes 1.25 [GH-784](https://github.com/hashicorp/vault-helm/pull/784) +* `vault` updated to 1.11.3 [GH-785](https://github.com/hashicorp/vault-helm/pull/785) + +## 0.21.0 (August 10th, 2022) + +CHANGES: +* `vault-k8s` updated to 0.17.0. [GH-771](https://github.com/hashicorp/vault-helm/pull/771) +* `vault-csi-provider` updated to 1.2.0 [GH-771](https://github.com/hashicorp/vault-helm/pull/771) +* `vault` updated to 1.11.2 [GH-771](https://github.com/hashicorp/vault-helm/pull/771) +* Start testing against Kubernetes 1.24. [GH-744](https://github.com/hashicorp/vault-helm/pull/744) +* Deprecated `injector.externalVaultAddr`. Added `global.externalVaultAddr`, which applies to both the Injector and the CSI Provider. [GH-745](https://github.com/hashicorp/vault-helm/pull/745) +* CSI Provider pods now set the `VAULT_ADDR` environment variable to either the internal Vault service or the configured external address. [GH-745](https://github.com/hashicorp/vault-helm/pull/745) + +Features: +* server: Add `server.statefulSet.securityContext` to override pod and container `securityContext`. [GH-767](https://github.com/hashicorp/vault-helm/pull/767) +* csi: Add `csi.daemonSet.securityContext` to override pod and container `securityContext`. [GH-767](https://github.com/hashicorp/vault-helm/pull/767) +* injector: Add `injector.securityContext` to override pod and container `securityContext`. [GH-750](https://github.com/hashicorp/vault-helm/pull/750) and [GH-767](https://github.com/hashicorp/vault-helm/pull/767) +* Add `server.service.activeNodePort` and `server.service.standbyNodePort` to specify the `nodePort` for active and standby services. [GH-610](https://github.com/hashicorp/vault-helm/pull/610) +* Support for setting annotations on the injector's serviceAccount [GH-753](https://github.com/hashicorp/vault-helm/pull/753) + +## 0.20.1 (May 25th, 2022) +CHANGES: +* `vault-k8s` updated to 0.16.1 [GH-739](https://github.com/hashicorp/vault-helm/pull/739) + +Improvements: +* Mutating webhook will no longer target the agent injector pod [GH-736](https://github.com/hashicorp/vault-helm/pull/736) + +Bugs: +* `vault` service account is now created even if the server is set to disabled, as per before 0.20.0 [GH-737](https://github.com/hashicorp/vault-helm/pull/737) + +## 0.20.0 (May 16th, 2022) + +CHANGES: +* `global.enabled` now works as documented, that is, setting `global.enabled` to false will disable everything, with individual components able to be turned on individually [GH-703](https://github.com/hashicorp/vault-helm/pull/703) +* Default value of `-` used for injector and server to indicate that they follow `global.enabled`. [GH-703](https://github.com/hashicorp/vault-helm/pull/703) +* Vault default image to 1.10.3 +* CSI provider default image to 1.1.0 +* Vault K8s default image to 0.16.0 +* Earliest Kubernetes version tested is now 1.16 +* Helm 3.6+ now required + +Features: +* Support topologySpreadConstraints in server and injector. [GH-652](https://github.com/hashicorp/vault-helm/pull/652) + +Improvements: +* CSI: Set `extraLabels` for daemonset, pods, and service account [GH-690](https://github.com/hashicorp/vault-helm/pull/690) +* Add namespace to injector-leader-elector role, rolebinding and secret [GH-683](https://github.com/hashicorp/vault-helm/pull/683) +* Support policy/v1 PodDisruptionBudget in Kubernetes 1.21+ for server and injector [GH-710](https://github.com/hashicorp/vault-helm/pull/710) +* Make the Cluster Address (CLUSTER_ADDR) configurable [GH-629](https://github.com/hashicorp/vault-helm/pull/709) +* server: Make `publishNotReadyAddresses` configurable for services [GH-694](https://github.com/hashicorp/vault-helm/pull/694) +* server: Allow config to be defined as a YAML object in the values file [GH-684](https://github.com/hashicorp/vault-helm/pull/684) +* Maintain default MutatingWebhookConfiguration values from `v1beta1` [GH-692](https://github.com/hashicorp/vault-helm/pull/692) + +## 0.19.0 (January 20th, 2022) + +CHANGES: +* Vault image default 1.9.2 +* Vault K8s image default 0.14.2 + +Features: +* Added configurable podDisruptionBudget for injector [GH-653](https://github.com/hashicorp/vault-helm/pull/653) +* Make terminationGracePeriodSeconds configurable for server [GH-659](https://github.com/hashicorp/vault-helm/pull/659) +* Added configurable update strategy for injector [GH-661](https://github.com/hashicorp/vault-helm/pull/661) +* csi: ability to set priorityClassName for CSI daemonset pods [GH-670](https://github.com/hashicorp/vault-helm/pull/670) + +Improvements: +* Set the namespace on the OpenShift Route [GH-679](https://github.com/hashicorp/vault-helm/pull/679) +* Add volumes and env vars to helm hook test pod [GH-673](https://github.com/hashicorp/vault-helm/pull/673) +* Make TLS configurable for OpenShift routes [GH-686](https://github.com/hashicorp/vault-helm/pull/686) + +## 0.18.0 (November 17th, 2021) + +CHANGES: +* Removed support for deploying a leader-elector container with the [vault-k8s injector](https://github.com/hashicorp/vault-k8s) injector since vault-k8s now uses an internal mechanism to determine leadership [GH-649](https://github.com/hashicorp/vault-helm/pull/649) +* Vault image default 1.9.0 +* Vault K8s image default 0.14.1 + +Improvements: +* Added templateConfig.staticSecretRenderInterval chart option for the injector [GH-621](https://github.com/hashicorp/vault-helm/pull/621) + +## 0.17.1 (October 25th, 2021) + +Improvements: + * Add option for Ingress PathType [GH-634](https://github.com/hashicorp/vault-helm/pull/634) + +## 0.17.0 (October 21st, 2021) + +KNOWN ISSUES: +* The chart will fail to deploy on Kubernetes 1.19+ with `server.ingress.enabled=true` because no `pathType` is set + +CHANGES: +* Vault image default 1.8.4 +* Vault K8s image default 0.14.0 + +Improvements: +* Support Ingress stable networking API [GH-590](https://github.com/hashicorp/vault-helm/pull/590) +* Support setting the `externalTrafficPolicy` for `LoadBalancer` and `NodePort` service types [GH-626](https://github.com/hashicorp/vault-helm/pull/626) +* Support setting ingressClassName on server Ingress [GH-630](https://github.com/hashicorp/vault-helm/pull/630) + +Bugs: +* Ensure `kubeletRootDir` volume path and mounts are the same when `csi.daemonSet.kubeletRootDir` is overridden [GH-628](https://github.com/hashicorp/vault-helm/pull/628) + +## 0.16.1 (September 29th, 2021) + +CHANGES: +* Vault image default 1.8.3 +* Vault K8s image default 0.13.1 + +## 0.16.0 (September 16th, 2021) + +CHANGES: +* Support for deploying a leader-elector container with the [vault-k8s injector](https://github.com/hashicorp/vault-k8s) injector will be removed in version 0.18.0 of this chart since vault-k8s now uses an internal mechanism to determine leadership. To enable the deployment of the leader-elector container for use with vault-k8s 0.12.0 and earlier, set `useContainer=true`. + +Improvements: + * Make CSI provider `hostPaths` configurable via `csi.daemonSet.providersDir` and `csi.daemonSet.kubeletRootDir` [GH-603](https://github.com/hashicorp/vault-helm/pull/603) + * Support vault-k8s internal leader election [GH-568](https://github.com/hashicorp/vault-helm/pull/568) [GH-607](https://github.com/hashicorp/vault-helm/pull/607) + +## 0.15.0 (August 23rd, 2021) + +Improvements: +* Add imagePullSecrets on server test [GH-572](https://github.com/hashicorp/vault-helm/pull/572) +* Add injector.webhookAnnotations chart option [GH-584](https://github.com/hashicorp/vault-helm/pull/584) + +## 0.14.0 (July 28th, 2021) + +Features: +* Added templateConfig.exitOnRetryFailure chart option for the injector [GH-560](https://github.com/hashicorp/vault-helm/pull/560) + +Improvements: +* Support configuring pod tolerations, pod affinity, and node selectors as YAML [GH-565](https://github.com/hashicorp/vault-helm/pull/565) +* Set the default vault image to come from the hashicorp organization [GH-567](https://github.com/hashicorp/vault-helm/pull/567) +* Add support for running the acceptance tests against a local `kind` cluster [GH-567](https://github.com/hashicorp/vault-helm/pull/567) +* Add `server.ingress.activeService` to configure if the ingress should use the active service [GH-570](https://github.com/hashicorp/vault-helm/pull/570) +* Add `server.route.activeService` to configure if the route should use the active service [GH-570](https://github.com/hashicorp/vault-helm/pull/570) +* Support configuring `global.imagePullSecrets` from a string array [GH-576](https://github.com/hashicorp/vault-helm/pull/576) + + +## 0.13.0 (June 17th, 2021) + +Improvements: +* Added a helm test for vault server [GH-531](https://github.com/hashicorp/vault-helm/pull/531) +* Added server.enterpriseLicense option [GH-547](https://github.com/hashicorp/vault-helm/pull/547) +* Added OpenShift overrides [GH-549](https://github.com/hashicorp/vault-helm/pull/549) + +Bugs: +* Fix ui.serviceNodePort schema [GH-537](https://github.com/hashicorp/vault-helm/pull/537) +* Fix server.ha.disruptionBudget.maxUnavailable schema [GH-535](https://github.com/hashicorp/vault-helm/pull/535) +* Added webhook-certs volume mount to sidecar injector [GH-545](https://github.com/hashicorp/vault-helm/pull/545) + +## 0.12.0 (May 25th, 2021) + +Features: +* Pass additional arguments to `vault-csi-provider` using `csi.extraArgs` [GH-526](https://github.com/hashicorp/vault-helm/pull/526) + +Improvements: +* Set chart kubeVersion and added chart-verifier tests [GH-510](https://github.com/hashicorp/vault-helm/pull/510) +* Added values json schema [GH-513](https://github.com/hashicorp/vault-helm/pull/513) +* Ability to set tolerations for CSI daemonset pods [GH-521](https://github.com/hashicorp/vault-helm/pull/521) +* UI target port is now configurable [GH-437](https://github.com/hashicorp/vault-helm/pull/437) + +Bugs: +* CSI: `global.imagePullSecrets` are now also used for CSI daemonset [GH-519](https://github.com/hashicorp/vault-helm/pull/519) + +## 0.11.0 (April 14th, 2021) + +Features: +* Added `server.enabled` to explicitly skip installing a Vault server [GH-486](https://github.com/hashicorp/vault-helm/pull/486) +* Injector now supports enabling host network [GH-471](https://github.com/hashicorp/vault-helm/pull/471) +* Injector port is now configurable [GH-489](https://github.com/hashicorp/vault-helm/pull/489) +* Injector Vault Agent resource defaults are now configurable [GH-493](https://github.com/hashicorp/vault-helm/pull/493) +* Extra paths can now be added to the Vault ingress service [GH-460](https://github.com/hashicorp/vault-helm/pull/460) +* Log level and format can now be set directly using `server.logFormat` and `server.logLevel` [GH-488](https://github.com/hashicorp/vault-helm/pull/488) + +Improvements: +* Added `https` name to injector service port [GH-495](https://github.com/hashicorp/vault-helm/pull/495) + +Bugs: +* CSI: Fix ClusterRole name and DaemonSet's service account to properly match deployment name [GH-486](https://github.com/hashicorp/vault-helm/pull/486) + +## 0.10.0 (March 25th, 2021) + +Features: +* Add support for [Vault CSI provider](https://github.com/hashicorp/vault-csi-provider) [GH-461](https://github.com/hashicorp/vault-helm/pull/461) + +Improvements: +* `objectSelector` can now be set on the mutating admission webhook [GH-456](https://github.com/hashicorp/vault-helm/pull/456) + +## 0.9.1 (February 2nd, 2021) + +Bugs: +* Injector: fix labels for default anti-affinity rule [GH-441](https://github.com/hashicorp/vault-helm/pull/441), [GH-442](https://github.com/hashicorp/vault-helm/pull/442) +* Set VAULT_DEV_LISTEN_ADDRESS in dev mode [GH-446](https://github.com/hashicorp/vault-helm/pull/446) + +## 0.9.0 (January 5th, 2021) + +Features: +* Injector now supports configurable number of replicas [GH-436](https://github.com/hashicorp/vault-helm/pull/436) +* Injector now supports auto TLS for multiple replicas using leader elections [GH-436](https://github.com/hashicorp/vault-helm/pull/436) + +Improvements: +* Dev mode now supports `server.extraArgs` [GH-421](https://github.com/hashicorp/vault-helm/pull/421) +* Dev mode root token is now configurable with `server.dev.devRootToken` [GH-415](https://github.com/hashicorp/vault-helm/pull/415) +* ClusterRoleBinding updated to `v1` [GH-395](https://github.com/hashicorp/vault-helm/pull/395) +* MutatingWebhook updated to `v1` [GH-408](https://github.com/hashicorp/vault-helm/pull/408) +* Injector service now supports `injector.service.annotations` [425](https://github.com/hashicorp/vault-helm/pull/425) +* Injector now supports `injector.extraLabels` [428](https://github.com/hashicorp/vault-helm/pull/428) +* Added `allowPrivilegeEscalation: false` to Vault and Injector containers [429](https://github.com/hashicorp/vault-helm/pull/429) +* Network Policy now supports `server.networkPolicy.egress` [389](https://github.com/hashicorp/vault-helm/pull/389) + +## 0.8.0 (October 20th, 2020) + +Improvements: +* Make server NetworkPolicy independent of OpenShift [GH-381](https://github.com/hashicorp/vault-helm/pull/381) +* Added configurables for all probe values [GH-387](https://github.com/hashicorp/vault-helm/pull/387) +* MountPath for audit and data storage is now configurable [GH-393](https://github.com/hashicorp/vault-helm/pull/393) +* Annotations can now be added to the Injector pods [GH-394](https://github.com/hashicorp/vault-helm/pull/394) +* The injector can now be configured with a failurePolicy [GH-400](https://github.com/hashicorp/vault-helm/pull/400) +* Added additional environment variables for rendering within Vault config [GH-398](https://github.com/hashicorp/vault-helm/pull/398) +* Service account for Vault K8s auth is automatically created when `injector.externalVaultAddr` is set [GH-392](https://github.com/hashicorp/vault-helm/pull/392) + +Bugs: +* Fixed install output using Helm V2 command [GH-378](https://github.com/hashicorp/vault-helm/pull/378) + +## 0.7.0 (August 24th, 2020) + +Features: +* Added `volumes` and `volumeMounts` for mounting _any_ type of volume [GH-314](https://github.com/hashicorp/vault-helm/pull/314). +* Added configurable to enable prometheus telemetery exporter for Vault Agent Injector [GH-372](https://github.com/hashicorp/vault-helm/pull/372) + +Improvements: +* Added `defaultMode` configurable to `extraVolumes`[GH-321](https://github.com/hashicorp/vault-helm/pull/321) +* Option to install and use PodSecurityPolicy's for vault server and injector [GH-177](https://github.com/hashicorp/vault-helm/pull/177) +* `VAULT_API_ADDR` is now configurable [GH-290](https://github.com/hashicorp/vault-helm/pull/290) +* Removed deprecated tolerate unready endpoint annotations [GH-363](https://github.com/hashicorp/vault-helm/pull/363) +* Add an option to set annotations on the StatefulSet [GH-199](https://github.com/hashicorp/vault-helm/pull/199) +* Make the vault server serviceAccount name a configuration option [GH-367](https://github.com/hashicorp/vault-helm/pull/367) +* Removed annotation striction from `dev` mode [GH-371](https://github.com/hashicorp/vault-helm/pull/371) +* Add an option to set annotations on PVCs [GH-364](https://github.com/hashicorp/vault-helm/pull/364) +* Added service configurables for UI [GH-285](https://github.com/hashicorp/vault-helm/pull/285) + +Bugs: +* Fix python dependency in test image [GH-337](https://github.com/hashicorp/vault-helm/pull/337) +* Fix caBundle not being quoted causing validation issues with Helm 3 [GH-352](https://github.com/hashicorp/vault-helm/pull/352) +* Fix injector network policy being rendered when injector is not enabled [GH-358](https://github.com/hashicorp/vault-helm/pull/358) + +## 0.6.0 (June 3rd, 2020) + +Features: +* Added `extraInitContainers` to define init containers for the Vault cluster [GH-258](https://github.com/hashicorp/vault-helm/pull/258) +* Added `postStart` lifecycle hook allowing users to configure commands to run on the Vault pods after they're ready [GH-315](https://github.com/hashicorp/vault-helm/pull/315) +* Beta: Added OpenShift support [GH-319](https://github.com/hashicorp/vault-helm/pull/319) + +Improvements: +* Server configs can now be defined in YAML. Multi-line string configs are still compatible [GH-213](https://github.com/hashicorp/vault-helm/pull/213) +* Removed IPC_LOCK privileges since swap is disabled on containers [[GH-198](https://github.com/hashicorp/vault-helm/pull/198)] +* Use port names that map to vault.scheme [[GH-223](https://github.com/hashicorp/vault-helm/pull/223)] +* Allow both yaml and multi-line string annotations [[GH-272](https://github.com/hashicorp/vault-helm/pull/272)] +* Added configurable to set the Raft node name to hostname [[GH-269](https://github.com/hashicorp/vault-helm/pull/269)] +* Support setting priorityClassName on pods [[GH-282](https://github.com/hashicorp/vault-helm/pull/282)] +* Added support for ingress apiVersion `networking.k8s.io/v1beta1` [[GH-310](https://github.com/hashicorp/vault-helm/pull/310)] +* Added configurable to change service type for the HA active service [GH-317](https://github.com/hashicorp/vault-helm/pull/317) + +Bugs: +* Fixed default ingress path [[GH-224](https://github.com/hashicorp/vault-helm/pull/224)] +* Fixed annotations for HA standby/active services [[GH-268](https://github.com/hashicorp/vault-helm/pull/268)] +* Updated some value defaults to match their use in templates [[GH-309](https://github.com/hashicorp/vault-helm/pull/309)] +* Use active service on ingress when ha [[GH-270](https://github.com/hashicorp/vault-helm/pull/270)] +* Fixed bug where pull secrets weren't being used for injector image [GH-298](https://github.com/hashicorp/vault-helm/pull/298) + +## 0.5.0 (April 9th, 2020) + +Features: + +* Added Raft support for HA mode [[GH-228](https://github.com/hashicorp/vault-helm/pull/229)] +* Now supports Vault Enterprise [[GH-250](https://github.com/hashicorp/vault-helm/pull/250)] +* Added K8s Service Registration for HA modes [[GH-250](https://github.com/hashicorp/vault-helm/pull/250)] + +* Option to set `AGENT_INJECT_VAULT_AUTH_PATH` for the injector [[GH-185](https://github.com/hashicorp/vault-helm/pull/185)] +* Added environment variables for logging and revocation on Vault Agent Injector [[GH-219](https://github.com/hashicorp/vault-helm/pull/219)] +* Option to set environment variables for the injector deployment [[GH-232](https://github.com/hashicorp/vault-helm/pull/232)] +* Added affinity, tolerations, and nodeSelector options for the injector deployment [[GH-234](https://github.com/hashicorp/vault-helm/pull/234)] +* Made all annotations multi-line strings [[GH-227](https://github.com/hashicorp/vault-helm/pull/227)] + +## 0.4.0 (February 21st, 2020) + +Improvements: + +* Allow process namespace sharing between Vault and sidecar containers [[GH-174](https://github.com/hashicorp/vault-helm/pull/174)] +* Added configurable to change updateStrategy [[GH-172](https://github.com/hashicorp/vault-helm/pull/172)] +* Added sleep in the preStop lifecycle step [[GH-188](https://github.com/hashicorp/vault-helm/pull/188)] +* Updated chart and tests to Helm 3 [[GH-195](https://github.com/hashicorp/vault-helm/pull/195)] +* Adds Values.injector.externalVaultAddr to use the injector with an external vault [[GH-207](https://github.com/hashicorp/vault-helm/pull/207)] + +Bugs: + +* Fix bug where Vault lifecycle was appended after extra containers. [[GH-179](https://github.com/hashicorp/vault-helm/pull/179)] + +## 0.3.3 (January 14th, 2020) + +Security: + +* Added `server.extraArgs` to allow loading of additional Vault configurations containing sensitive settings [GH-175](https://github.com/hashicorp/vault-helm/issues/175) + +Bugs: + +* Fixed injection bug where wrong environment variables were being used for manually mounted TLS files + +## 0.3.2 (January 8th, 2020) + +Bugs: + +* Fixed injection bug where TLS Skip Verify was true by default [VK8S-35] + +## 0.3.1 (January 2nd, 2020) + +Bugs: + +* Fixed injection bug causing kube-system pods to be rejected [VK8S-14] + +## 0.3.0 (December 19th, 2019) + +Features: + +* Extra containers can now be added to the Vault pods +* Added configurability of pod probes +* Added Vault Agent Injector + +Improvements: + +* Moved `global.image` to `server.image` +* Changed UI service template to route pods that aren't ready via `publishNotReadyAddresses: true` +* Added better HTTP/HTTPS scheme support to http probes +* Added configurable node port for Vault service +* `server.authDelegator` is now enabled by default + +Bugs: + +* Fixed upgrade bug by removing chart label which contained the version +* Fixed typo on `serviceAccount` (was `serviceaccount`) +* Fixed readiness/liveliness HTTP probe default to accept standbys + +## 0.2.1 (November 12th, 2019) + +Bugs: + +* Removed `readOnlyRootFilesystem` causing issues when validating deployments + +## 0.2.0 (October 29th, 2019) + +Features: + +* Added load balancer support +* Added ingress support +* Added configurable for service types (ClusterIP, NodePort, LoadBalancer, etc) +* Removed root requirements, now runs as Vault user + +Improvements: + +* Added namespace value to all rendered objects +* Made ports configurable in services +* Added the ability to add custom annotations to services +* Added docker image for running bats test in CircleCI +* Removed restrictions around `dev` mode such as annotations +* `readOnlyRootFilesystem` is now configurable +* Image Pull Policy is now configurable + +Bugs: + +* Fixed selector bugs related to Helm label updates (services, affinities, and pod disruption) +* Fixed bug where audit storage was not being mounted in HA mode +* Fixed bug where Vault pod wasn't receiving SIGTERM signals + + +## 0.1.2 (August 22nd, 2019) + +Features: + +* Added `extraSecretEnvironmentVars` to allow users to mount secrets as + environment variables +* Added `tlsDisable` configurable to change HTTP protocols from HTTP/HTTPS + depending on the value +* Added `serviceNodePort` to configure a NodePort value when setting `serviceType` + to "NodePort" + +Improvements: + +* Changed UI port to 8200 for better HTTP protocol support +* Added `path` to `extraVolumes` to define where the volume should be + mounted. Defaults to `/vault/userconfig` +* Upgraded Vault to 1.2.2 + +Bugs: + +* Fixed bug where upgrade would fail because immutable labels were being + changed (Helm Version label) +* Fixed bug where UI service used wrong selector after updating helm labels +* Added `VAULT_API_ADDR` env to Vault pod to fixed bug where Vault thinks + Consul is the active node +* Removed `step-down` preStop since it requires authentication. Shutdown signal + sent by Kube acts similar to `step-down` + + +## 0.1.1 (August 7th, 2019) + +Features: + +* Added `authDelegator` Cluster Role Binding to Vault service account for + bootstrapping Kube auth method + +Improvements: + +* Added `server.service.clusterIP` to `values.yml` so users can toggle + the Vault service to headless by using the value `None`. +* Upgraded Vault to 1.2.1 + +## 0.1.0 (August 6th, 2019) + +Initial release diff --git a/ansible/roles/helm_install/files/vault/CONTRIBUTING.md b/ansible/roles/helm_install/files/vault/CONTRIBUTING.md new file mode 100644 index 0000000..ad31ac9 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/CONTRIBUTING.md @@ -0,0 +1,247 @@ +# Contributing to Vault Helm + +**Please note:** We take Vault's security and our users' trust very seriously. +If you believe you have found a security issue in Vault, please responsibly +disclose by contacting us at security@hashicorp.com. + +**First:** if you're unsure or afraid of _anything_, just ask or submit the +issue or pull request anyways. You won't be yelled at for giving it your best +effort. The worst that can happen is that you'll be politely asked to change +something. We appreciate any sort of contributions, and don't want a wall of +rules to get in the way of that. + +That said, if you want to ensure that a pull request is likely to be merged, +talk to us! You can find out our thoughts and ensure that your contribution +won't clash or be obviated by Vault's normal direction. A great way to do this +is via the [Vault Discussion Forum][1]. + +This document will cover what we're looking for in terms of reporting issues. +By addressing all the points we're looking for, it raises the chances we can +quickly merge or address your contributions. + +[1]: https://discuss.hashicorp.com/c/vault + +## Issues + +### Reporting an Issue + +* Make sure you test against the latest released version. It is possible + we already fixed the bug you're experiencing. Even better is if you can test + against `main`, as bugs are fixed regularly but new versions are only + released every few months. + +* Provide steps to reproduce the issue, and if possible include the expected + results as well as the actual results. Please provide text, not screen shots! + +* Respond as promptly as possible to any questions made by the Vault + team to your issue. Stale issues will be closed periodically. + +### Issue Lifecycle + +1. The issue is reported. + +2. The issue is verified and categorized by a Vault Helm collaborator. + Categorization is done via tags. For example, bugs are marked as "bugs". + +3. Unless it is critical, the issue may be left for a period of time (sometimes + many weeks), giving outside contributors -- maybe you!? -- a chance to + address the issue. + +4. The issue is addressed in a pull request or commit. The issue will be + referenced in the commit message so that the code that fixes it is clearly + linked. + +5. The issue is closed. Sometimes, valid issues will be closed to keep + the issue tracker clean. The issue is still indexed and available for + future viewers, or can be re-opened if necessary. + +## Testing + +The Helm chart ships with both unit and acceptance tests. + +The unit tests don't require any active Kubernetes cluster and complete +very quickly. These should be used for fast feedback during development. +The acceptance tests require a Kubernetes cluster with a configured `kubectl`. + +### Test Using Docker Container + +The following are the instructions for running bats tests using a Docker container. + +#### Prerequisites + +* Docker installed +* `vault-helm` checked out locally + +#### Test + +**Note:** the following commands should be run from the `vault-helm` directory. + +First, build the Docker image for running the tests: + +```shell +docker build -f ${PWD}/test/docker/Test.dockerfile ${PWD}/test/docker/ -t vault-helm-test +``` +Next, execute the tests with the following commands: +```shell +docker run -it --rm -v "${PWD}:/test" vault-helm-test bats /test/test/unit +``` +It's possible to only run specific bats tests using regular expressions. +For example, the following will run only tests with "injector" in the name: +```shell +docker run -it --rm -v "${PWD}:/test" vault-helm-test bats /test/test/unit -f "injector" +``` + +### Test Manually +The following are the instructions for running bats tests on your workstation. +#### Prerequisites +* [Bats](https://github.com/bats-core/bats-core) + ```bash + brew install bats-core + ``` +* [yq](https://pypi.org/project/yq/) + ```bash + brew install python-yq + ``` +* [helm](https://helm.sh) + ```bash + brew install kubernetes-helm + ``` + +#### Test + +To run the unit tests: + + bats ./test/unit + +To run the acceptance tests: + + bats ./test/acceptance + +If the acceptance tests fail, deployed resources in the Kubernetes cluster +may not be properly cleaned up. We recommend recycling the Kubernetes cluster to +start from a clean slate. + +**Note:** There is a Terraform configuration in the +[`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/main/test/terraform) directory +that can be used to quickly bring up a GKE cluster and configure +`kubectl` and `helm` locally. This can be used to quickly spin up a test +cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes +cluster. + +### Writing Unit Tests + +Changes to the Helm chart should be accompanied by appropriate unit tests. + +#### Formatting + +- Put tests in the test file in the same order as the variables appear in the `values.yaml`. +- Start tests for a chart value with a header that says what is being tested, like this: + ``` + #-------------------------------------------------------------------- + # annotations + ``` + +- Name the test based on what it's testing in the following format (this will be its first line): + ``` + @test "
    : " { + ``` + + When adding tests to an existing file, the first section will be the same as the other tests in the file. + +#### Test Details + +[Bats](https://github.com/bats-core/bats-core) provides a way to run commands in a shell and inspect the output in an automated way. +In all of the tests in this repo, the base command being run is [helm template](https://docs.helm.sh/helm/#helm-template) which turns the templated files into straight yaml output. +In this way, we're able to test that the various conditionals in the templates render as we would expect. + +Each test defines the files that should be rendered using the `--show-only` flag, then it might adjust chart values by adding `--set` flags as well. +The output from this `helm template` command is then piped to [yq](https://pypi.org/project/yq/). +`yq` allows us to pull out just the information we're interested in, either by referencing its position in the yaml file directly or giving information about it (like its length). +The `-r` flag can be used with `yq` to return a raw string instead of a quoted one which is especially useful when looking for an exact match. + +The test passes or fails based on the conditional at the end that is in square brackets, which is a comparison of our expected value and the output of `helm template` piped to `yq`. + +The `| tee /dev/stderr ` pieces direct any terminal output of the `helm template` and `yq` commands to stderr so that it doesn't interfere with `bats`. + +#### Test Examples + +Here are some examples of common test patterns: + +- Check that a value is disabled by default + + ``` + @test "ui/Service: no type by default" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "null" ] + } + ``` + + In this example, nothing is changed from the default templates (no `--set` flags), then we use `yq` to retrieve the value we're checking, `.spec.type`. + This output is then compared against our expected value (`null` in this case) in the assertion `[ "${actual}" = "null" ]`. + + +- Check that a template value is rendered to a specific value + ``` + @test "ui/Service: specified type" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + --set 'ui.serviceType=LoadBalancer' \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "LoadBalancer" ] + } + ``` + + This is very similar to the last example, except we've changed a default value with the `--set` flag and correspondingly changed the expected value. + +- Check that a template value contains several values + ``` + @test "server/standalone-StatefulSet: custom resources" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'server.standalone.enabled=true' \ + --set 'server.resources.requests.memory=256Mi' \ + --set 'server.resources.requests.cpu=250m' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) + [ "${actual}" = "256Mi" ] + + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'server.standalone.enabled=true' \ + --set 'server.resources.limits.memory=256Mi' \ + --set 'server.resources.limits.cpu=250m' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) + [ "${actual}" = "256Mi" ] + ``` + + *Note:* If testing more than two conditions, it would be good to separate the `helm template` part of the command from the `yq` sections to reduce redundant work. + +- Check that an entire template file is not rendered + ``` + @test "syncCatalog/Deployment: disabled by default" { + cd `chart_dir` + local actual=$( (helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'global.enabled=false' \ + . || echo "---") | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] + } + ``` + Here we are check the length of the command output to see if the anything is rendered. + This style can easily be switched to check that a file is rendered instead. + +## Contributor License Agreement + +We require that all contributors sign our Contributor License Agreement ("CLA") +before we can accept the contribution. + +[Learn more about why HashiCorp requires a CLA and what the CLA includes](https://www.hashicorp.com/cla) diff --git a/ansible/roles/helm_install/files/vault/Chart.yaml b/ansible/roles/helm_install/files/vault/Chart.yaml new file mode 100644 index 0000000..ac1dcec --- /dev/null +++ b/ansible/roles/helm_install/files/vault/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +appVersion: 1.12.0 +description: Official HashiCorp Vault Chart +home: https://www.vaultproject.io +icon: https://github.com/hashicorp/vault/raw/f22d202cde2018f9455dec755118a9b84586e082/Vault_PrimaryLogo_Black.png +keywords: +- vault +- security +- encryption +- secrets +- management +- automation +- infrastructure +kubeVersion: '>= 1.16.0-0' +name: vault +sources: +- https://github.com/hashicorp/vault +- https://github.com/hashicorp/vault-helm +- https://github.com/hashicorp/vault-k8s +- https://github.com/hashicorp/vault-csi-provider +version: 0.22.1 diff --git a/ansible/roles/helm_install/files/vault/LICENSE b/ansible/roles/helm_install/files/vault/LICENSE new file mode 100644 index 0000000..74f38c0 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/LICENSE @@ -0,0 +1,355 @@ +Copyright (c) 2018 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/ansible/roles/helm_install/files/vault/Makefile b/ansible/roles/helm_install/files/vault/Makefile new file mode 100644 index 0000000..e423f35 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/Makefile @@ -0,0 +1,101 @@ +TEST_IMAGE?=vault-helm-test +GOOGLE_CREDENTIALS?=vault-helm-test.json +CLOUDSDK_CORE_PROJECT?=vault-helm-dev-246514 +# set to run a single test - e.g acceptance/server-ha-enterprise-dr.bats +ACCEPTANCE_TESTS?=acceptance + +# filter bats unit tests to run. +UNIT_TESTS_FILTER?='.*' + +# set to 'true' to run acceptance tests locally in a kind cluster +LOCAL_ACCEPTANCE_TESTS?=false + +# kind cluster name +KIND_CLUSTER_NAME?=vault-helm + +# kind k8s version +KIND_K8S_VERSION?=v1.25.0 + +# Generate json schema for chart values. See test/README.md for more details. +values-schema: + helm schema-gen values.yaml > values.schema.json + +test-image: + @docker build --rm -t $(TEST_IMAGE) -f $(CURDIR)/test/docker/Test.dockerfile $(CURDIR) + +test-unit: + @docker run --rm -it -v ${PWD}:/helm-test $(TEST_IMAGE) bats -f $(UNIT_TESTS_FILTER) /helm-test/test/unit + +test-bats: test-unit test-acceptance + +test: test-image test-bats + +# run acceptance tests on GKE +# set google project/credential vars above +test-acceptance: +ifeq ($(LOCAL_ACCEPTANCE_TESTS),true) + make setup-kind acceptance +else + @docker run -it -v ${PWD}:/helm-test \ + -e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \ + -e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \ + -e KUBECONFIG=/helm-test/.kube/config \ + -e VAULT_LICENSE_CI=${VAULT_LICENSE_CI} \ + -w /helm-test \ + $(TEST_IMAGE) \ + make acceptance +endif + +# destroy GKE cluster using terraform +test-destroy: + @docker run -it -v ${PWD}:/helm-test \ + -e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \ + -e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \ + -w /helm-test \ + $(TEST_IMAGE) \ + make destroy-cluster + +# provision GKE cluster using terraform +test-provision: + @docker run -it -v ${PWD}:/helm-test \ + -e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \ + -e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \ + -e KUBECONFIG=/helm-test/.kube/config \ + -w /helm-test \ + $(TEST_IMAGE) \ + make provision-cluster + +# this target is for running the acceptance tests +# it is run in the docker container above when the test-acceptance target is invoked +acceptance: +ifneq ($(LOCAL_ACCEPTANCE_TESTS),true) + gcloud auth activate-service-account --key-file=${GOOGLE_CREDENTIALS} +endif + bats --tap --timing test/${ACCEPTANCE_TESTS} + +# this target is for provisioning the GKE cluster +# it is run in the docker container above when the test-provision target is invoked +provision-cluster: + gcloud auth activate-service-account --key-file=${GOOGLE_CREDENTIALS} + terraform init test/terraform + terraform apply -var project=${CLOUDSDK_CORE_PROJECT} -var init_cli=true -auto-approve test/terraform + +# this target is for removing the GKE cluster +# it is run in the docker container above when the test-destroy target is invoked +destroy-cluster: + terraform destroy -auto-approve + +# create a kind cluster for running the acceptance tests locally +setup-kind: + kind get clusters | grep -q "^${KIND_CLUSTER_NAME}$$" || \ + kind create cluster \ + --image kindest/node:${KIND_K8S_VERSION} \ + --name ${KIND_CLUSTER_NAME} \ + --config $(CURDIR)/test/kind/config.yaml + kubectl config use-context kind-${KIND_CLUSTER_NAME} + +# delete the kind cluster +delete-kind: + kind delete cluster --name ${KIND_CLUSTER_NAME} || : + +.PHONY: values-schema test-image test-unit test-bats test test-acceptance test-destroy test-provision acceptance provision-cluster destroy-cluster diff --git a/ansible/roles/helm_install/files/vault/README.MD b/ansible/roles/helm_install/files/vault/README.MD new file mode 100644 index 0000000..816e4e6 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/README.MD @@ -0,0 +1,127 @@ +## override-values.yaml 내용 확인 +# +``` +# user_vault의 access_key, secret_key를 입력. +# vault-auto-unseal key id를 입력. + seal "awskms" { + region = "ap-northeast-2" + access_key = user_vault의 access_key + secret_key = user_vault의 secret_key + kms_key_id = aws kms vault-auto-unseal key id + } + +``` +## vault server 설치 +``` +helm install vault-server -n dsk-middle -f override-values.yaml . +``` + +## vault server 생성 확인 +``` +kubectl get pods -n dsk-middle +``` + +## vault server 초기화 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault operator init +``` +위 명령어로 나온 key 값들은 반드시 파일로 저장 후 반드시 보관 필요\ +vault server 봉인 해제, ui 접속 등에 필요 + +## vault server 봉인 해제. unseal key 5 개 중, 아무거나 3 개 필요 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault operator unseal +``` +### unseal key 입력 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault operator unseal +``` +### unseal key 입력 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault operator unseal +``` +### unseal key 입력 + +## vault server login +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault login +``` +### Initial Root Token 입력 + +## vault secret engine 활성화. 사용 엔진 kv (key value) +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault secrets enable -version=2 -path=tls kv +``` +## secret engine 활성화 확인 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault secrets list +``` + +## approle 활성화 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault auth enable approle +``` +## approle 활성화 확인 +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault auth list +``` + +## policy 생성. (secret에 접근하는 권한 설정) +``` +kubectl exec -it -n dsk-middle vault-server-0 -- vault policy write datasaker -<&1 + livenessProbe: + httpGet: + path: /health/ready + port: {{ .Values.injector.port }} + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: {{ .Values.injector.port }} + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 +{{- if .Values.injector.certs.secretName }} + volumeMounts: + - name: webhook-certs + mountPath: /etc/webhook/certs + readOnly: true +{{- end }} +{{- if .Values.injector.certs.secretName }} + volumes: + - name: webhook-certs + secret: + secretName: "{{ .Values.injector.certs.secretName }}" +{{- end }} + {{- include "imagePullSecrets" . | nindent 6 }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-disruptionbudget.yaml b/ansible/roles/helm_install/files/vault/templates/injector-disruptionbudget.yaml new file mode 100644 index 0000000..b44fd73 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-disruptionbudget.yaml @@ -0,0 +1,20 @@ +{{- if .Values.injector.podDisruptionBudget }} +apiVersion: {{ ge .Capabilities.KubeVersion.Minor "21" | ternary "policy/v1" "policy/v1beta1" }} +kind: PodDisruptionBudget +metadata: + name: {{ template "vault.fullname" . }}-agent-injector + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + component: webhook +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + component: webhook + {{- toYaml .Values.injector.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-mutating-webhook.yaml b/ansible/roles/helm_install/files/vault/templates/injector-mutating-webhook.yaml new file mode 100644 index 0000000..3d3fd36 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-mutating-webhook.yaml @@ -0,0 +1,39 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if .Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1" }} +apiVersion: admissionregistration.k8s.io/v1 +{{- else }} +apiVersion: admissionregistration.k8s.io/v1beta1 +{{- end }} +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-cfg + labels: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- template "injector.webhookAnnotations" . }} +webhooks: + - name: vault.hashicorp.com + failurePolicy: {{ ((.Values.injector.webhook)).failurePolicy | default .Values.injector.failurePolicy }} + matchPolicy: {{ ((.Values.injector.webhook)).matchPolicy | default "Exact" }} + sideEffects: None + timeoutSeconds: {{ ((.Values.injector.webhook)).timeoutSeconds | default "30" }} + admissionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: {{ template "vault.fullname" . }}-agent-injector-svc + namespace: {{ .Release.Namespace }} + path: "/mutate" + caBundle: {{ .Values.injector.certs.caBundle | quote }} + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] +{{- if or (.Values.injector.namespaceSelector) (((.Values.injector.webhook)).namespaceSelector) }} + namespaceSelector: +{{ toYaml (((.Values.injector.webhook)).namespaceSelector | default .Values.injector.namespaceSelector) | indent 6}} +{{ end }} +{{- template "injector.objectSelector" . -}} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-network-policy.yaml b/ansible/roles/helm_install/files/vault/templates/injector-network-policy.yaml new file mode 100644 index 0000000..68892d2 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-network-policy.yaml @@ -0,0 +1,24 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if eq (.Values.global.openshift | toString) "true" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "vault.fullname" . }}-agent-injector + labels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + component: webhook + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8080 + protocol: TCP +{{ end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-psp-role.yaml b/ansible/roles/helm_install/files/vault/templates/injector-psp-role.yaml new file mode 100644 index 0000000..5d23c75 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-psp-role.yaml @@ -0,0 +1,20 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if eq (.Values.global.psp.enable | toString) "true" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-psp + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "vault.fullname" . }}-agent-injector +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-psp-rolebinding.yaml b/ansible/roles/helm_install/files/vault/templates/injector-psp-rolebinding.yaml new file mode 100644 index 0000000..4f6b0a8 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-psp-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if eq (.Values.global.psp.enable | toString) "true" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-psp + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + kind: Role + name: {{ template "vault.fullname" . }}-agent-injector-psp + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "vault.fullname" . }}-agent-injector +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/injector-psp.yaml b/ansible/roles/helm_install/files/vault/templates/injector-psp.yaml new file mode 100644 index 0000000..1eee2fc --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-psp.yaml @@ -0,0 +1,46 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if eq (.Values.global.psp.enable | toString) "true" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "vault.fullname" . }}-agent-injector + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- template "vault.psp.annotations" . }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: MustRunAsNonRoot + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/injector-role.yaml b/ansible/roles/helm_install/files/vault/templates/injector-role.yaml new file mode 100644 index 0000000..08c8264 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-role.yaml @@ -0,0 +1,29 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if and (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-leader-elector-role + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: + - apiGroups: [""] + resources: ["secrets", "configmaps"] + verbs: + - "create" + - "get" + - "watch" + - "list" + - "update" + - apiGroups: [""] + resources: ["pods"] + verbs: + - "get" + - "patch" + - "delete" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/injector-rolebinding.yaml b/ansible/roles/helm_install/files/vault/templates/injector-rolebinding.yaml new file mode 100644 index 0000000..ea0db11 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-rolebinding.yaml @@ -0,0 +1,22 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +{{- if and (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-leader-elector-binding + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "vault.fullname" . }}-agent-injector-leader-elector-role +subjects: + - kind: ServiceAccount + name: {{ template "vault.fullname" . }}-agent-injector + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/injector-service.yaml b/ansible/roles/helm_install/files/vault/templates/injector-service.yaml new file mode 100644 index 0000000..5e747d6 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-service.yaml @@ -0,0 +1,22 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }}-agent-injector-svc + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ template "injector.service.annotations" . }} +spec: + ports: + - name: https + port: 443 + targetPort: {{ .Values.injector.port }} + selector: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + component: webhook +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/injector-serviceaccount.yaml b/ansible/roles/helm_install/files/vault/templates/injector-serviceaccount.yaml new file mode 100644 index 0000000..d1919b9 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/injector-serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- template "vault.injectorEnabled" . -}} +{{- if .injectorEnabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "vault.fullname" . }}-agent-injector + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ template "injector.serviceAccount.annotations" . }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/prometheus-prometheusrules.yaml b/ansible/roles/helm_install/files/vault/templates/prometheus-prometheusrules.yaml new file mode 100644 index 0000000..572f1a0 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/prometheus-prometheusrules.yaml @@ -0,0 +1,26 @@ +{{ if and (.Values.serverTelemetry.prometheusRules.rules) + (or (.Values.global.serverTelemetry.prometheusOperator) (.Values.serverTelemetry.prometheusRules.enabled) ) +}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "vault.fullname" . }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- /* update the selectors docs in values.yaml whenever the defaults below change. */ -}} + {{- $selectors := .Values.serverTelemetry.prometheusRules.selectors }} + {{- if $selectors }} + {{- toYaml $selectors | nindent 4 }} + {{- else }} + release: prometheus + {{- end }} +spec: + groups: + - name: {{ include "vault.fullname" . }} + rules: + {{- toYaml .Values.serverTelemetry.prometheusRules.rules | nindent 6 }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/prometheus-servicemonitor.yaml b/ansible/roles/helm_install/files/vault/templates/prometheus-servicemonitor.yaml new file mode 100644 index 0000000..323e51f --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/prometheus-servicemonitor.yaml @@ -0,0 +1,44 @@ +{{ template "vault.mode" . }} +{{ if or (.Values.global.serverTelemetry.prometheusOperator) (.Values.serverTelemetry.serviceMonitor.enabled) }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "vault.fullname" . }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- /* update the selectors docs in values.yaml whenever the defaults below change. */ -}} + {{- $selectors := .Values.serverTelemetry.serviceMonitor.selectors }} + {{- if $selectors }} + {{- toYaml $selectors | nindent 4 }} + {{- else }} + release: prometheus + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if eq .mode "ha" }} + vault-active: "true" + {{- else }} + vault-internal: "true" + {{- end }} + endpoints: + - port: {{ include "vault.scheme" . }} + interval: {{ .Values.serverTelemetry.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.serverTelemetry.serviceMonitor.scrapeTimeout }} + scheme: {{ include "vault.scheme" . | lower }} + path: /v1/sys/metrics + params: + format: + - prometheus + tlsConfig: + insecureSkipVerify: true + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-clusterrolebinding.yaml b/ansible/roles/helm_install/files/vault/templates/server-clusterrolebinding.yaml new file mode 100644 index 0000000..8cdd611 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{ template "vault.serverAuthDelegator" . }} +{{- if .serverAuthDelegator -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" -}} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: {{ template "vault.fullname" . }}-server-binding + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: {{ template "vault.serviceAccount.name" . }} + namespace: {{ .Release.Namespace }} +{{ end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/server-config-configmap.yaml b/ansible/roles/helm_install/files/vault/templates/server-config-configmap.yaml new file mode 100644 index 0000000..f40c696 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-config-configmap.yaml @@ -0,0 +1,40 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if .serverEnabled -}} +{{- if ne .mode "dev" -}} +{{ if or (.Values.server.standalone.config) (.Values.server.ha.config) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "vault.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + extraconfig-from-values.hcl: |- + {{- if or (eq .mode "ha") (eq .mode "standalone") }} + {{- $type := typeOf (index .Values.server .mode).config }} + {{- if eq $type "string" }} + disable_mlock = true + {{- if eq .mode "standalone" }} + {{ tpl .Values.server.standalone.config . | nindent 4 | trim }} + {{- else if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "false") }} + {{ tpl .Values.server.ha.config . | nindent 4 | trim }} + {{- else if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "true") }} + {{ tpl .Values.server.ha.raft.config . | nindent 4 | trim }} + {{ end }} + {{- else }} + {{- if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "true") }} +{{ merge (dict "disable_mlock" true) (index .Values.server .mode).raft.config | toPrettyJson | indent 4 }} + {{- else }} +{{ merge (dict "disable_mlock" true) (index .Values.server .mode).config | toPrettyJson | indent 4 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-discovery-role.yaml b/ansible/roles/helm_install/files/vault/templates/server-discovery-role.yaml new file mode 100644 index 0000000..9ca23dd --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-discovery-role.yaml @@ -0,0 +1,21 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if .serverEnabled -}} +{{- if eq .mode "ha" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: {{ .Release.Namespace }} + name: {{ template "vault.fullname" . }}-discovery-role + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list", "update", "patch"] +{{ end }} +{{ end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-discovery-rolebinding.yaml b/ansible/roles/helm_install/files/vault/templates/server-discovery-rolebinding.yaml new file mode 100644 index 0000000..6e22e4c --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-discovery-rolebinding.yaml @@ -0,0 +1,29 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if .serverEnabled -}} +{{- if eq .mode "ha" }} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" -}} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +kind: RoleBinding +metadata: + name: {{ template "vault.fullname" . }}-discovery-rolebinding + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "vault.fullname" . }}-discovery-role +subjects: +- kind: ServiceAccount + name: {{ template "vault.serviceAccount.name" . }} + namespace: {{ .Release.Namespace }} +{{ end }} +{{ end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-disruptionbudget.yaml b/ansible/roles/helm_install/files/vault/templates/server-disruptionbudget.yaml new file mode 100644 index 0000000..d940fa4 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-disruptionbudget.yaml @@ -0,0 +1,26 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" -}} +{{- if .serverEnabled -}} +{{- if and (eq .mode "ha") (eq (.Values.server.ha.disruptionBudget.enabled | toString) "true") -}} +# PodDisruptionBudget to prevent degrading the server cluster through +# voluntary cluster changes. +apiVersion: {{ ge .Capabilities.KubeVersion.Minor "21" | ternary "policy/v1" "policy/v1beta1" }} +kind: PodDisruptionBudget +metadata: + name: {{ template "vault.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + maxUnavailable: {{ template "vault.pdb.maxUnavailable" . }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/roles/helm_install/files/vault/templates/server-ha-active-service.yaml b/ansible/roles/helm_install/files/vault/templates/server-ha-active-service.yaml new file mode 100644 index 0000000..ef21237 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-ha-active-service.yaml @@ -0,0 +1,46 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- template "vault.serverServiceEnabled" . -}} +{{- if .serverServiceEnabled -}} +{{- if eq .mode "ha" }} +# Service for active Vault pod +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }}-active + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + vault-active: "true" + annotations: +{{ template "vault.service.annotations" .}} +spec: + {{- if .Values.server.service.type}} + type: {{ .Values.server.service.type }} + {{- end}} + {{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} + {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} + publishNotReadyAddresses: {{ .Values.server.service.publishNotReadyAddresses }} + ports: + - name: {{ include "vault.scheme" . }} + port: {{ .Values.server.service.port }} + targetPort: {{ .Values.server.service.targetPort }} + {{- if and (.Values.server.service.activeNodePort) (eq (.Values.server.service.type | toString) "NodePort") }} + nodePort: {{ .Values.server.service.activeNodePort }} + {{- end }} + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server + vault-active: "true" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-ha-standby-service.yaml b/ansible/roles/helm_install/files/vault/templates/server-ha-standby-service.yaml new file mode 100644 index 0000000..e6d66af --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-ha-standby-service.yaml @@ -0,0 +1,45 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- template "vault.serverServiceEnabled" . -}} +{{- if .serverServiceEnabled -}} +{{- if eq .mode "ha" }} +# Service for standby Vault pod +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }}-standby + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + annotations: +{{ template "vault.service.annotations" .}} +spec: + {{- if .Values.server.service.type}} + type: {{ .Values.server.service.type }} + {{- end}} + {{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} + {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} + publishNotReadyAddresses: {{ .Values.server.service.publishNotReadyAddresses }} + ports: + - name: {{ include "vault.scheme" . }} + port: {{ .Values.server.service.port }} + targetPort: {{ .Values.server.service.targetPort }} + {{- if and (.Values.server.service.standbyNodePort) (eq (.Values.server.service.type | toString) "NodePort") }} + nodePort: {{ .Values.server.service.standbyNodePort }} + {{- end }} + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server + vault-active: "false" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/server-headless-service.yaml b/ansible/roles/helm_install/files/vault/templates/server-headless-service.yaml new file mode 100644 index 0000000..b03f491 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-headless-service.yaml @@ -0,0 +1,34 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- template "vault.serverServiceEnabled" . -}} +{{- if .serverServiceEnabled -}} +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }}-internal + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + vault-internal: "true" + annotations: +{{ template "vault.service.annotations" .}} +spec: + clusterIP: None + publishNotReadyAddresses: {{ .Values.server.service.publishNotReadyAddresses }} + ports: + - name: "{{ include "vault.scheme" . }}" + port: {{ .Values.server.service.port }} + targetPort: {{ .Values.server.service.targetPort }} + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-ingress.yaml b/ansible/roles/helm_install/files/vault/templates/server-ingress.yaml new file mode 100644 index 0000000..c81e5f5 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-ingress.yaml @@ -0,0 +1,77 @@ +{{- if not .Values.global.openshift }} +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if .Values.server.ingress.enabled -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +{{- $serviceName := include "vault.fullname" . -}} +{{- template "vault.serverServiceEnabled" . -}} +{{- if .serverServiceEnabled -}} +{{- if and (eq .mode "ha" ) (eq (.Values.server.ingress.activeService | toString) "true") }} +{{- $serviceName = printf "%s-%s" $serviceName "active" -}} +{{- end }} +{{- $servicePort := .Values.server.service.port -}} +{{- $pathType := .Values.server.ingress.pathType -}} +{{- $kubeVersion := .Capabilities.KubeVersion.Version }} +{{ if semverCompare ">= 1.19.0-0" $kubeVersion }} +apiVersion: networking.k8s.io/v1 +{{ else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end }} +kind: Ingress +metadata: + name: {{ template "vault.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.server.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- template "vault.ingress.annotations" . }} +spec: +{{- if .Values.server.ingress.tls }} + tls: + {{- range .Values.server.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} +{{- if .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} +{{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + {{- range (.paths | default (list "/")) }} + - path: {{ . }} + {{ if semverCompare ">= 1.19.0-0" $kubeVersion }} + pathType: {{ $pathType }} + {{ end }} + backend: + {{ if semverCompare ">= 1.19.0-0" $kubeVersion }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{ else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/roles/helm_install/files/vault/templates/server-network-policy.yaml b/ansible/roles/helm_install/files/vault/templates/server-network-policy.yaml new file mode 100644 index 0000000..5f4c21a --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-network-policy.yaml @@ -0,0 +1,26 @@ +{{- if eq (.Values.server.networkPolicy.enabled | toString) "true" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "vault.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8200 + protocol: TCP + - port: 8201 + protocol: TCP + {{- if .Values.server.networkPolicy.egress }} + egress: + {{- toYaml .Values.server.networkPolicy.egress | nindent 4 }} + {{ end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-psp-role.yaml b/ansible/roles/helm_install/files/vault/templates/server-psp-role.yaml new file mode 100644 index 0000000..b8eb897 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-psp-role.yaml @@ -0,0 +1,20 @@ +{{ template "vault.mode" . }} +{{- if .serverEnabled -}} +{{- if and (ne .mode "") (eq (.Values.global.psp.enable | toString) "true") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "vault.fullname" . }}-psp + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "vault.fullname" . }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-psp-rolebinding.yaml b/ansible/roles/helm_install/files/vault/templates/server-psp-rolebinding.yaml new file mode 100644 index 0000000..fded9fb --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-psp-rolebinding.yaml @@ -0,0 +1,21 @@ +{{ template "vault.mode" . }} +{{- if .serverEnabled -}} +{{- if and (ne .mode "") (eq (.Values.global.psp.enable | toString) "true") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "vault.fullname" . }}-psp + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + kind: Role + name: {{ template "vault.fullname" . }}-psp + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "vault.fullname" . }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-psp.yaml b/ansible/roles/helm_install/files/vault/templates/server-psp.yaml new file mode 100644 index 0000000..d210af3 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-psp.yaml @@ -0,0 +1,49 @@ +{{ template "vault.mode" . }} +{{- if .serverEnabled -}} +{{- if and (ne .mode "") (eq (.Values.global.psp.enable | toString) "true") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "vault.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- template "vault.psp.annotations" . }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + {{- if eq (.Values.server.dataStorage.enabled | toString) "true" }} + - persistentVolumeClaim + {{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: MustRunAsNonRoot + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-route.yaml b/ansible/roles/helm_install/files/vault/templates/server-route.yaml new file mode 100644 index 0000000..e122d93 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-route.yaml @@ -0,0 +1,34 @@ +{{- if .Values.global.openshift }} +{{- if ne .mode "external" }} +{{- if .Values.server.route.enabled -}} +{{- $serviceName := include "vault.fullname" . -}} +{{- if and (eq .mode "ha" ) (eq (.Values.server.route.activeService | toString) "true") }} +{{- $serviceName = printf "%s-%s" $serviceName "active" -}} +{{- end }} +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: {{ template "vault.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.server.route.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- template "vault.route.annotations" . }} +spec: + host: {{ .Values.server.route.host }} + to: + kind: Service + name: {{ $serviceName }} + weight: 100 + port: + targetPort: 8200 + tls: + {{- toYaml .Values.server.route.tls | nindent 4 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-service.yaml b/ansible/roles/helm_install/files/vault/templates/server-service.yaml new file mode 100644 index 0000000..3a9b0e7 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-service.yaml @@ -0,0 +1,44 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- template "vault.serverServiceEnabled" . -}} +{{- if .serverServiceEnabled -}} +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + annotations: +{{ template "vault.service.annotations" .}} +spec: + {{- if .Values.server.service.type}} + type: {{ .Values.server.service.type }} + {{- end}} + {{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} + {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: {{ .Values.server.service.publishNotReadyAddresses }} + ports: + - name: {{ include "vault.scheme" . }} + port: {{ .Values.server.service.port }} + targetPort: {{ .Values.server.service.targetPort }} + {{- if and (.Values.server.service.nodePort) (eq (.Values.server.service.type | toString) "NodePort") }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-serviceaccount.yaml b/ansible/roles/helm_install/files/vault/templates/server-serviceaccount.yaml new file mode 100644 index 0000000..c0d32d1 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{ template "vault.serverServiceAccountEnabled" . }} +{{- if .serverServiceAccountEnabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "vault.serviceAccount.name" . }} + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ template "vault.serviceAccount.annotations" . }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/server-statefulset.yaml b/ansible/roles/helm_install/files/vault/templates/server-statefulset.yaml new file mode 100644 index 0000000..fb3cbfa --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/server-statefulset.yaml @@ -0,0 +1,210 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if ne .mode "" }} +{{- if .serverEnabled -}} +# StatefulSet to run the actual vault server cluster. +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "vault.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- template "vault.statefulSet.annotations" . }} +spec: + serviceName: {{ template "vault.fullname" . }}-internal + podManagementPolicy: Parallel + replicas: {{ template "vault.replicas" . }} + updateStrategy: + type: {{ .Values.server.updateStrategyType }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server + template: + metadata: + labels: + helm.sh/chart: {{ template "vault.chart" . }} + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server + {{- if .Values.server.extraLabels -}} + {{- toYaml .Values.server.extraLabels | nindent 8 -}} + {{- end -}} + {{ template "vault.annotations" . }} + spec: + {{ template "vault.affinity" . }} + {{ template "vault.topologySpreadConstraints" . }} + {{ template "vault.tolerations" . }} + {{ template "vault.nodeselector" . }} + {{- if .Values.server.priorityClassName }} + priorityClassName: {{ .Values.server.priorityClassName }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + serviceAccountName: {{ template "vault.serviceAccount.name" . }} + {{ if .Values.server.shareProcessNamespace }} + shareProcessNamespace: true + {{ end }} + {{- template "server.statefulSet.securityContext.pod" . }} + {{- if not .Values.global.openshift }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- end }} + + volumes: + {{ template "vault.volumes" . }} + - name: home + emptyDir: {} + {{- if .Values.server.extraInitContainers }} + initContainers: + {{ toYaml .Values.server.extraInitContainers | nindent 8}} + {{- end }} + containers: + - name: vault + {{ template "vault.resources" . }} + image: {{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default "latest" }} + imagePullPolicy: {{ .Values.server.image.pullPolicy }} + command: + - "/bin/sh" + - "-ec" + args: {{ template "vault.args" . }} + {{- template "server.statefulSet.securityContext.container" . }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: VAULT_K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: VAULT_ADDR + value: "{{ include "vault.scheme" . }}://127.0.0.1:8200" + - name: VAULT_API_ADDR + {{- if .Values.server.ha.apiAddr }} + value: {{ .Values.server.ha.apiAddr }} + {{- else }} + value: "{{ include "vault.scheme" . }}://$(POD_IP):8200" + {{- end }} + - name: SKIP_CHOWN + value: "true" + - name: SKIP_SETCAP + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_CLUSTER_ADDR + {{- if .Values.server.ha.clusterAddr }} + value: {{ .Values.server.ha.clusterAddr }} + {{- else }} + value: "https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201" + {{- end }} + {{- if and (eq (.Values.server.ha.raft.enabled | toString) "true") (eq (.Values.server.ha.raft.setNodeId | toString) "true") }} + - name: VAULT_RAFT_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- end }} + - name: HOME + value: "/home/vault" + {{- if .Values.server.logLevel }} + - name: VAULT_LOG_LEVEL + value: "{{ .Values.server.logLevel }}" + {{- end }} + {{- if .Values.server.logFormat }} + - name: VAULT_LOG_FORMAT + value: "{{ .Values.server.logFormat }}" + {{- end }} + {{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} + - name: VAULT_LICENSE_PATH + value: /vault/license/{{ .Values.server.enterpriseLicense.secretKey }} + {{- end }} + {{ template "vault.envs" . }} + {{- include "vault.extraEnvironmentVars" .Values.server | nindent 12 }} + {{- include "vault.extraSecretEnvironmentVars" .Values.server | nindent 12 }} + volumeMounts: + {{ template "vault.mounts" . }} + - name: home + mountPath: /home/vault + ports: + - containerPort: 8200 + name: {{ include "vault.scheme" . }} + - containerPort: 8201 + name: https-internal + - containerPort: 8202 + name: {{ include "vault.scheme" . }}-rep + {{- if .Values.server.readinessProbe.enabled }} + readinessProbe: + {{- if .Values.server.readinessProbe.path }} + httpGet: + path: {{ .Values.server.readinessProbe.path | quote }} + port: 8200 + scheme: {{ include "vault.scheme" . | upper }} + {{- else }} + # Check status; unsealed vault servers return 0 + # The exit code reflects the seal status: + # 0 - unsealed + # 1 - error + # 2 - sealed + exec: + command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"] + {{- end }} + failureThreshold: {{ .Values.server.readinessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.server.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.server.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.server.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.server.readinessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.server.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.server.livenessProbe.path | quote }} + port: 8200 + scheme: {{ include "vault.scheme" . | upper }} + failureThreshold: {{ .Values.server.livenessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.server.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.server.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.server.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.server.livenessProbe.timeoutSeconds }} + {{- end }} + lifecycle: + # Vault container doesn't receive SIGTERM from Kubernetes + # and after the grace period ends, Kube sends SIGKILL. This + # causes issues with graceful shutdowns such as deregistering itself + # from Consul (zombie services). + preStop: + exec: + command: [ + "/bin/sh", "-c", + # Adding a sleep here to give the pod eviction a + # chance to propagate, so requests will not be made + # to this pod while it's terminating + "sleep {{ .Values.server.preStopSleepSeconds }} && kill -SIGTERM $(pidof vault)", + ] + {{- if .Values.server.postStart }} + postStart: + exec: + command: + {{- range (.Values.server.postStart) }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.server.extraContainers }} + {{ toYaml .Values.server.extraContainers | nindent 8}} + {{- end }} + {{- include "imagePullSecrets" . | nindent 6 }} + {{ template "vault.volumeclaims" . }} +{{ end }} +{{ end }} +{{ end }} diff --git a/ansible/roles/helm_install/files/vault/templates/tests/server-test.yaml b/ansible/roles/helm_install/files/vault/templates/tests/server-test.yaml new file mode 100644 index 0000000..56dbee7 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/tests/server-test.yaml @@ -0,0 +1,51 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- if .serverEnabled -}} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ .Release.Name }}-server-test" + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": test +spec: + {{- include "imagePullSecrets" . | nindent 2 }} + containers: + - name: {{ .Release.Name }}-server-test + image: {{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default "latest" }} + imagePullPolicy: {{ .Values.server.image.pullPolicy }} + env: + - name: VAULT_ADDR + value: {{ include "vault.scheme" . }}://{{ template "vault.fullname" . }}.{{ .Release.Namespace }}.svc:{{ .Values.server.service.port }} + {{- include "vault.extraEnvironmentVars" .Values.server | nindent 8 }} + command: + - /bin/sh + - -c + - | + echo "Checking for sealed info in 'vault status' output" + ATTEMPTS=10 + n=0 + until [ "$n" -ge $ATTEMPTS ] + do + echo "Attempt" $n... + vault status -format yaml | grep -E '^sealed: (true|false)' && break + n=$((n+1)) + sleep 5 + done + if [ $n -ge $ATTEMPTS ]; then + echo "timed out looking for sealed info in 'vault status' output" + exit 1 + fi + + exit 0 + volumeMounts: + {{- if .Values.server.volumeMounts }} + {{- toYaml .Values.server.volumeMounts | nindent 8}} + {{- end }} + volumes: + {{- if .Values.server.volumes }} + {{- toYaml .Values.server.volumes | nindent 4}} + {{- end }} + restartPolicy: Never +{{- end }} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/templates/ui-service.yaml b/ansible/roles/helm_install/files/vault/templates/ui-service.yaml new file mode 100644 index 0000000..d45afdd --- /dev/null +++ b/ansible/roles/helm_install/files/vault/templates/ui-service.yaml @@ -0,0 +1,37 @@ +{{ template "vault.mode" . }} +{{- if ne .mode "external" }} +{{- template "vault.uiEnabled" . -}} +{{- if .uiEnabled -}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "vault.fullname" . }}-ui + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }}-ui + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- template "vault.ui.annotations" . }} +spec: + selector: + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + component: server + {{- if and (.Values.ui.activeVaultPodOnly) (eq .mode "ha") }} + vault-active: "true" + {{- end }} + publishNotReadyAddresses: {{ .Values.ui.publishNotReadyAddresses }} + ports: + - name: {{ include "vault.scheme" . }} + port: {{ .Values.ui.externalPort }} + targetPort: {{ .Values.ui.targetPort }} + {{- if .Values.ui.serviceNodePort }} + nodePort: {{ .Values.ui.serviceNodePort }} + {{- end }} + type: {{ .Values.ui.serviceType }} + {{- include "service.externalTrafficPolicy" .Values.ui }} + {{- include "service.loadBalancer" .Values.ui }} +{{- end -}} +{{- end }} diff --git a/ansible/roles/helm_install/files/vault/test b/ansible/roles/helm_install/files/vault/test new file mode 100644 index 0000000..bcd48de --- /dev/null +++ b/ansible/roles/helm_install/files/vault/test @@ -0,0 +1,600 @@ +--- +# Source: vault/templates/injector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-vault-agent-injector + namespace: dsk-middle + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +--- +# Source: vault/templates/server-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-vault + namespace: dsk-middle + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +--- +# Source: vault/templates/server-config-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: release-name-vault-config + namespace: dsk-middle + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +data: + extraconfig-from-values.hcl: |- + disable_mlock = true + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "file" { + path = "/vault/data" + } + + seal "awskms" { + region = "ap-northeast-2" + access_key = "AKIAXMVVF3TA3NTIIHN6" + secret_key = "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" + kms_key_id = "c5b3ae3a-e976-4773-abcb-18d68c26a67d" + } +--- +# Source: vault/templates/injector-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-vault-agent-injector-clusterrole + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: + - "get" + - "list" + - "watch" + - "patch" +--- +# Source: vault/templates/injector-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-vault-agent-injector-binding + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-vault-agent-injector-clusterrole +subjects: +- kind: ServiceAccount + name: release-name-vault-agent-injector + namespace: dsk-middle +--- +# Source: vault/templates/server-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-vault-server-binding + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: release-name-vault + namespace: dsk-middle +--- +# Source: vault/templates/injector-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: release-name-vault-agent-injector-svc + namespace: dsk-middle + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + +spec: + ports: + - name: https + port: 443 + targetPort: 8080 + selector: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + component: webhook +--- +# Source: vault/templates/server-headless-service.yaml +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: release-name-vault-internal + namespace: dsk-middle + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + vault-internal: "true" + annotations: + +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "http" + port: 8200 + targetPort: 8200 + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + component: server +--- +# Source: vault/templates/server-service.yaml +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: release-name-vault + namespace: dsk-middle + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + annotations: + +spec: + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + - name: https-internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + component: server +--- +# Source: vault/templates/ui-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: release-name-vault-ui + namespace: dsk-middle + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault-ui + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +spec: + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + component: server + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + nodePort: 32702 + type: NodePort + externalTrafficPolicy: Cluster +--- +# Source: vault/templates/injector-deployment.yaml +# Deployment for the injector +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-vault-agent-injector + namespace: dsk-middle + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm + component: webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + component: webhook + + template: + metadata: + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + component: webhook + spec: + + affinity: + + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + + + + tolerations: + - key: dev/data-kafka + operator: Exists + + serviceAccountName: "release-name-vault-agent-injector" + + securityContext: + runAsNonRoot: true + runAsGroup: 1000 + runAsUser: 100 + fsGroup: 1000 + hostNetwork: false + containers: + - name: sidecar-injector + + image: "hashicorp/vault-k8s:1.0.1" + imagePullPolicy: "IfNotPresent" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: AGENT_INJECT_LISTEN + value: :8080 + - name: AGENT_INJECT_LOG_LEVEL + value: info + - name: AGENT_INJECT_VAULT_ADDR + value: http://release-name-vault.dsk-middle.svc:8200 + - name: AGENT_INJECT_VAULT_AUTH_PATH + value: auth/kubernetes + - name: AGENT_INJECT_VAULT_IMAGE + value: "hashicorp/vault:1.12.0" + - name: AGENT_INJECT_TLS_AUTO + value: release-name-vault-agent-injector-cfg + - name: AGENT_INJECT_TLS_AUTO_HOSTS + value: release-name-vault-agent-injector-svc,release-name-vault-agent-injector-svc.dsk-middle,release-name-vault-agent-injector-svc.dsk-middle.svc + - name: AGENT_INJECT_LOG_FORMAT + value: standard + - name: AGENT_INJECT_REVOKE_ON_SHUTDOWN + value: "false" + - name: AGENT_INJECT_CPU_REQUEST + value: "250m" + - name: AGENT_INJECT_CPU_LIMIT + value: "500m" + - name: AGENT_INJECT_MEM_REQUEST + value: "64Mi" + - name: AGENT_INJECT_MEM_LIMIT + value: "128Mi" + - name: AGENT_INJECT_DEFAULT_TEMPLATE + value: "map" + - name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE + value: "true" + + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + args: + - agent-inject + - 2>&1 + livenessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 +--- +# Source: vault/templates/server-statefulset.yaml +# StatefulSet to run the actual vault server cluster. +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: release-name-vault + namespace: dsk-middle + labels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +spec: + serviceName: release-name-vault-internal + podManagementPolicy: Parallel + replicas: 1 + updateStrategy: + type: OnDelete + selector: + matchLabels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + component: server + template: + metadata: + labels: + helm.sh/chart: vault-0.22.1 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: release-name + component: server + spec: + + affinity: + + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: datasaker/group + operator: In + values: + - data-kafka + + + + tolerations: + - key: dev/data-kafka + operator: Exists + + terminationGracePeriodSeconds: 10 + serviceAccountName: release-name-vault + + securityContext: + runAsNonRoot: true + runAsGroup: 1000 + runAsUser: 100 + fsGroup: 1000 + hostNetwork: false + + volumes: + + - name: config + configMap: + name: release-name-vault-config + + - name: home + emptyDir: {} + containers: + - name: vault + + image: hashicorp/vault:1.12.0 + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-ec" + args: + - | + cp /vault/config/extraconfig-from-values.hcl /tmp/storageconfig.hcl; + [ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /tmp/storageconfig.hcl; + [ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /tmp/storageconfig.hcl; + [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /tmp/storageconfig.hcl; + [ -n "${API_ADDR}" ] && sed -Ei "s|API_ADDR|${API_ADDR?}|g" /tmp/storageconfig.hcl; + [ -n "${TRANSIT_ADDR}" ] && sed -Ei "s|TRANSIT_ADDR|${TRANSIT_ADDR?}|g" /tmp/storageconfig.hcl; + [ -n "${RAFT_ADDR}" ] && sed -Ei "s|RAFT_ADDR|${RAFT_ADDR?}|g" /tmp/storageconfig.hcl; + /usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl + + securityContext: + allowPrivilegeEscalation: false + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: VAULT_K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: VAULT_ADDR + value: "http://127.0.0.1:8200" + - name: VAULT_API_ADDR + value: "http://$(POD_IP):8200" + - name: SKIP_CHOWN + value: "true" + - name: SKIP_SETCAP + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_CLUSTER_ADDR + value: "https://$(HOSTNAME).release-name-vault-internal:8201" + - name: HOME + value: "/home/vault" + + + + volumeMounts: + + + + - name: data + mountPath: /vault/data + + + + - name: config + mountPath: /vault/config + + - name: home + mountPath: /home/vault + ports: + - containerPort: 8200 + name: http + - containerPort: 8201 + name: https-internal + - containerPort: 8202 + name: http-rep + readinessProbe: + # Check status; unsealed vault servers return 0 + # The exit code reflects the seal status: + # 0 - unsealed + # 1 - error + # 2 - sealed + exec: + command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"] + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + lifecycle: + # Vault container doesn't receive SIGTERM from Kubernetes + # and after the grace period ends, Kube sends SIGKILL. This + # causes issues with graceful shutdowns such as deregistering itself + # from Consul (zombie services). + preStop: + exec: + command: [ + "/bin/sh", "-c", + # Adding a sleep here to give the pod eviction a + # chance to propagate, so requests will not be made + # to this pod while it's terminating + "sleep 5 && kill -SIGTERM $(pidof vault)", + ] + + + volumeClaimTemplates: + - metadata: + name: data + + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: nfs-provisioner-dev +--- +# Source: vault/templates/injector-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: release-name-vault-agent-injector-cfg + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: release-name + app.kubernetes.io/managed-by: Helm +webhooks: + - name: vault.hashicorp.com + failurePolicy: Ignore + matchPolicy: Exact + sideEffects: None + timeoutSeconds: 30 + admissionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: release-name-vault-agent-injector-svc + namespace: dsk-middle + path: "/mutate" + caBundle: "" + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + objectSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - vault-agent-injector +--- +# Source: vault/templates/tests/server-test.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "release-name-server-test" + namespace: dsk-middle + annotations: + "helm.sh/hook": test +spec: + + containers: + - name: release-name-server-test + image: hashicorp/vault:1.12.0 + imagePullPolicy: IfNotPresent + env: + - name: VAULT_ADDR + value: http://release-name-vault.dsk-middle.svc:8200 + + command: + - /bin/sh + - -c + - | + echo "Checking for sealed info in 'vault status' output" + ATTEMPTS=10 + n=0 + until [ "$n" -ge $ATTEMPTS ] + do + echo "Attempt" $n... + vault status -format yaml | grep -E '^sealed: (true|false)' && break + n=$((n+1)) + sleep 5 + done + if [ $n -ge $ATTEMPTS ]; then + echo "timed out looking for sealed info in 'vault status' output" + exit 1 + fi + + exit 0 + volumeMounts: + volumes: + restartPolicy: Never diff --git a/ansible/roles/helm_install/files/vault/tls/ca-cert.pem b/ansible/roles/helm_install/files/vault/tls/ca-cert.pem new file mode 100644 index 0000000..bac7c9f --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/ca-cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFlTCCA32gAwIBAgIUcJvZ6e+t9LnoPjpzENgZQVW8e04wDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCS1IxDTALBgNVBAgMBEFTSUExDjAMBgNVBAcMBVNFT1VM +MQ0wCwYDVQQKDARFWEVNMQ4wDAYDVQQLDAVDTE9VRDENMAsGA1UEAwwERVhFTTAe +Fw0yMjEyMDYwNzA3MThaFw0zMjEyMDMwNzA3MThaMFoxCzAJBgNVBAYTAktSMQ0w +CwYDVQQIDARBU0lBMQ4wDAYDVQQHDAVTRU9VTDENMAsGA1UECgwERVhFTTEOMAwG +A1UECwwFQ0xPVUQxDTALBgNVBAMMBEVYRU0wggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDbpnrLEJhtRspuAS0zPSdldRciPlC7pVBpiMZY5kDsCrW9D1qR +7e0Dy+J9XrJlKvByrDRxvHsdEU3LTlmuss5Pg8XQTmXm/mdznaAiQuzjmFzRXIs5 +KXA0oP8LsT6wgEZSMOE4psKvVBOZRzlGSV325ucsPNa16KOX8a9skZYl6GM1COn3 +dm8jNyKboRhHcs1opl0CsZ8Wh8sljJQ1M/Tm9QxGYlCVtWEarA9p4Qv2r+Nz53Da +1AicUM9PFGRmcsH8uNsQF6SB1GHgbshViV9A/gVJtWoHvjRfcVo/B4Q/wmsYfLal +yyWJxZatUJFg5z8/YRrLmRPBNeMmOl2c197J3vBNKzbwFp5AesyNfDlP51aIdg7C +TUn89Pts9afT237/lVxnxsCZJfB0QeV+EPlS8/lJdGW85EhZkhm27UjUkHwXnTcs +pagiJ07Jk3q4ulL8yT+MK1dVxpgTD3Po99LU/E+VZLtzPUD+F9+H4MQcZRDBuJrv +0PpRPgwHkCUTytnHBAZj1ZVZEjgKzW9HbVXvi+DkLJsuOSWy39STCaAkMXQA2rV5 +Z/bkDKlz5JQoKRZsHFAdzJQ/+29GONbTRIUlKXMSglG4i1sHRLVRyr2Ep9II/dQX +wClXulBDfOIKWP5PD0Pqju47aR/9+SttU1TPBqT+BGP4HVst6ofiIIfnmQIDAQAB +o1MwUTAdBgNVHQ4EFgQUxDyce/PQj55GdKLbhRO2/LPksxowHwYDVR0jBBgwFoAU +xDyce/PQj55GdKLbhRO2/LPksxowDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0B +AQsFAAOCAgEA174OK1t8d31bO7iVYoAYmg19GJNtd3MPHfDclose7zt8wtbasVrx +SYcBcMjSYCHZoGCV1gxI2SoNN4I8Q2JohZbr9pu18SdPgh911PRPTBxRPeXGtYL5 +/R0Xn83J3B+VsR1/fqmKpnWouA5ReRSjQ5cBpMS+amVdnxesNnMMVafdJ//Qn+MU +wt8oNI+opU/GPobpTfI4PWWpUANa8QUk85CrYNkfIG3anKRovEM0HQ03iVvwG7Bc +vAS/O18CrEFcSJMjsBT/yWX44k+I7ZJCSSAMnFTDEzTmgR3oV306Ycrb2IPvAXxl +VEyRYBLdkhSi3aWZqXHwMhUcNV6ozosF4qHfq1AOj8DTI+r9057bX9+JOwXyYLT7 +tfnz+uF7VNCQH8PZ3PRgXQ2CKbISdm0z0y0rnVoDRv80UwJdZiuKfsW069PE5bX7 +Y1r+MoQH3mup9Kotj1VWhjHN+czG7OBxhb0gga55FdyuV/gHYJfWc8UemmHed1MK +0TvUR8+yDuYrgjaNCBK7zdXX0plwaDjaCi3FZIytXZXr2dl6eza9NTf5tzCase9G +m8asGTo29ye1lt+VIx50mN5M/DYc4OOpgm+kzaiRquIV81+Qa1HCBbOHlmGheSyr +lbRwwnil2jLj3Qh81E6Z9BUEHmvFsr0s/Z2Ge3ffWsknsUcTpt49Z48= +-----END CERTIFICATE----- diff --git a/ansible/roles/helm_install/files/vault/tls/ca-cert.srl b/ansible/roles/helm_install/files/vault/tls/ca-cert.srl new file mode 100644 index 0000000..b49f304 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/ca-cert.srl @@ -0,0 +1 @@ +09BA177CB280640376E27730B98C9945A5BBBA05 diff --git a/ansible/roles/helm_install/files/vault/tls/ca-key.pem b/ansible/roles/helm_install/files/vault/tls/ca-key.pem new file mode 100644 index 0000000..9e9ead6 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/ca-key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDbpnrLEJhtRspu +AS0zPSdldRciPlC7pVBpiMZY5kDsCrW9D1qR7e0Dy+J9XrJlKvByrDRxvHsdEU3L +Tlmuss5Pg8XQTmXm/mdznaAiQuzjmFzRXIs5KXA0oP8LsT6wgEZSMOE4psKvVBOZ +RzlGSV325ucsPNa16KOX8a9skZYl6GM1COn3dm8jNyKboRhHcs1opl0CsZ8Wh8sl +jJQ1M/Tm9QxGYlCVtWEarA9p4Qv2r+Nz53Da1AicUM9PFGRmcsH8uNsQF6SB1GHg +bshViV9A/gVJtWoHvjRfcVo/B4Q/wmsYfLalyyWJxZatUJFg5z8/YRrLmRPBNeMm +Ol2c197J3vBNKzbwFp5AesyNfDlP51aIdg7CTUn89Pts9afT237/lVxnxsCZJfB0 +QeV+EPlS8/lJdGW85EhZkhm27UjUkHwXnTcspagiJ07Jk3q4ulL8yT+MK1dVxpgT +D3Po99LU/E+VZLtzPUD+F9+H4MQcZRDBuJrv0PpRPgwHkCUTytnHBAZj1ZVZEjgK +zW9HbVXvi+DkLJsuOSWy39STCaAkMXQA2rV5Z/bkDKlz5JQoKRZsHFAdzJQ/+29G +ONbTRIUlKXMSglG4i1sHRLVRyr2Ep9II/dQXwClXulBDfOIKWP5PD0Pqju47aR/9 ++SttU1TPBqT+BGP4HVst6ofiIIfnmQIDAQABAoICAApr8DiRTYIl0TX99UyQkr10 +HEhiJ4d3tQSE2lBegGcSrQPO5i+1V8EN2+2x4An/yw/NzuE0nazH5OaIDaZF0pcg +O/MBvUEEAhmQuYfWoBG+aWz/lJV+O1Yr6139J2nNdEizDASZmPQjq5hisjt9AKnV +09McAd34r0Zmz7l5gOPEjmx2Q0+b/XLwy0ISFypQrqFP/2rAzbkOcVuTO3b4w55v +swjl4kfwJq7SyhpudJhcSRD3QroHRG9SQDvSzl60BDM+/Bnb3VDRqGEzMGsqjOvz +AYI5StIaOrbqlGsYKrSxv04WoL3mER5j0x3fDD7KSgxySYiudyr8KQ5nx4hJD8dy +uwoZRjfxi1gLxQW7PfXuiDCIL9TLWeCbNE5kLhP7nc4Y2UFUQsJ8+F3kKiCXC0y2 +l7IDQzYALUgoToJou2uPkSO43z5X4dEXbVMhTH7wmurfqyJMpDrhVZ6sLHdlYOMO +5px8o7F7nW3adycTM4cII9BLO2cO7dWVhvOX+sSq3DGNTJKQBkVMbvOOcgFaoKvE +BSvMlmVP93gmp9vrN4Q830WGmwgjgeFd7B3DGLM/7fH3D0HPrAbjyq3V4LAzv+ZS +a09c8lJaZTSnDCPXYHBqJtVXUsyntPMSkFP/ko+NAD7U9pPoF4Kr0At0djZJ3PQI +4DVE4MkbLx4kAQFH7eiBAoIBAQD0ABOt/Oa24WFnP75cN5kmGlUr7WbpukcIJH1f +FwQrU3QzebIJH2gAD7DlmAZZK9jklE3yuwDBwbu4h0/lox/ow/x0sgJiKE4rtOCQ +QGM+DtpYVqVKtu0xMSA12w6rIQqfoS4ZO3fBRI3Vp9GzsiMVWLwprgfMJMOqOwdy +pwp8ko969GHyA4f4qAyXjbHgX3MmmpWvXBEq87xPsu6wJncDStSPXUSGz68RQvrG +7lTvLe32fYmcJvOLpdkTHFOSXWZTwHLKg/9mVpQ69U8y4XW5F2HjS9hH8LdEhYTY +TjSQ+BP6bY3gscgv6ZnbV7rtJmRXTpTwfu2NjkYb39XgA8GhAoIBAQDmc9cpzU3Z +ek0uYim0onyhsYNSqZypCEi1ulwuLQQS+0hQMnixZYZMNrmvNKp1lB0pVd3Uoq+r +3hAdRfFc3wX0mOvHAUxralewJ3gWz8gJeJRV0reOoAlIgekcb67NRKDYSIgKfKNT +kfMLwcXfI1NyeWATFmA7bT3vppdmMxr+7NPgqE8lZ6NZgtJBcXgwjBXXBDY45eC4 +WDxzEZTypwsLNjB2RigTAFu72By50+aFgr5dC+5ivi5cT3zThY4bxi4y4fCAi8tx +3bW+j2eGwN2xVTmNiImwJ5pd4j6MU7DvMDDbAlYKCgCYh+pPqbrfblpl4XN8M0El +3dahu/0w+1L5AoIBAQC0Af6CxGn9D2cJgehRJh9REeo13w2mbuuKYUkwxs0nnYjD +lQs4WbgjG3BY4YAFdyiq8ZJT+YwEZYW1C/Zwppia40OzX8QMYEnTSrzFDwtoNBL3 +SuzkovF38hMeSD6ZF2Y7/+//Y3Tk20t8DTPSyx/A1tbyNNbBz3mec6/58Kv23TUG +85XyspI0at3nTPymWCSenTrCxTxRIMGTVqFqyOXSQvlvztIIVt2D8ZMLAbcdQRgm +WdUUo1PXeYIc7C/ncgpUzUyihSD9gfAXUvSbx+NOqqZnxoOIB1ldLtejsIZtUOeE +QCCEXQQf/MmqHR3laQZrtqiAiFN+tDCazdZ+TqchAoIBAQCOXvKGeLenq88S4MIz +Pk8Q9cHTx8VpCB0jaEaaoGTr9SHvqb62maomW4jLnRXSHfcGGm45d2hgHYiHDTbX +0VC0k+TVxQ+ZLNGPmlKuhR9n+n5ppwyPqH8TtG53iufLGKLl+6lQEUA12Oz2TngY +KpLbHfEk2aYKEKs66wiNsvTymQ0stc5vpV9dZVzP4XIHTAQSGiAFdr8mJBz0WeBT +KK/B9XBrIVyQoKALpK/affbkdKl2gnB7wFKT5OeRJeltbzoLZVpXkAXYCsjFOYMT +1QMhEy3DKp8lnNHZPnS17N5aZsNICmFtQlCr6wvuu0Uf0+U1G9tk16Vf53U55hG9 +uQbBAoIBAQDDYFtI0g9mG7eCyt8lZ1YW/sNPLhr2Pw8KjvZ2SMSp1rIRb6DDShiF +ayFq7KIKJMgqm88Efapqx85yePKK/voaX4zL0Medr0HNT/jifbosNm/59FdFEul1 +mFYI6Nf9pTMRhuCyAtNbzZ3iZb9d1iUKKlNg7haGd+Z3C5K/CZ+7C+ovi5xKTvhp +VmEEybhILypySADSYnI6mE5dOf+kSwPdQpipLb84CrCwkJVgD3JH8tXiJR5CgWdg +Y5H0JGN4IaZpws3CdCFy6YPW9e3BjygnUxg1zIXJgJO4SNyPf6xckqnM/H27XvHc +cwaotmmDO7aug5eY2IhumKimQRSFGf7+ +-----END PRIVATE KEY----- diff --git a/ansible/roles/helm_install/files/vault/tls/client-cert.pem b/ansible/roles/helm_install/files/vault/tls/client-cert.pem new file mode 100644 index 0000000..ee7aec6 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/client-cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIUU/FxrarBbSo7RaTjkegsZtiEEcQwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCS1IxDTALBgNVBAgMBEFTSUExDjAMBgNVBAcMBVNFT1VM +MQ0wCwYDVQQKDARFWEVNMQ4wDAYDVQQLDAVDTE9VRDENMAsGA1UEAwwERVhFTTAe +Fw0yMjEyMDYwNzA3MjJaFw0zMjEyMDMwNzA3MjJaMGAxCzAJBgNVBAYTAktSMQ0w +CwYDVQQIDARBU0lBMQ4wDAYDVQQHDAVTRU9VTDENMAsGA1UECgwERVhFTTEOMAwG +A1UECwwFQ0xPVUQxEzARBgNVBAMMClRFU1QtQUdFTlQwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCxCa4ZQy9G+sIKVIquGnkBkPfBI7AeocbslM+jnj17 +TqqJPMSOGVZGlCktKVqGd6LkuI5mf53IhYbx/mU85UdgQPYEI0JcfIkNejYybtd1 +t0+/jwqx3t8YR/xG1ShOwqY2tx0ieoW4aKJEk/JT/LlNKgQwcxdFt1ShwaXKo2Fc +HgfY9hIQDKcwtSn/Clz0VM0VuOAdFLiPypf4nDqHS+aKgjAt9yZaiZwhbMGWhTT4 +pR0hMsGiwwXw6tKEeWoVaAkg2wlY7ABZQRpgsqoNKXdTpkl6apryUJ3M/PvBce5J +yMErVg5Rb2s2wLpK0fakrxE7sgLVffydhICvLLr80Msq7zJw6kJPtr9I5b3U96wN +rrruioLJvXYlv1KX7jwEc+4FADCXvNf6geCemXTRJ7Sn1qFA82q6YfdcMQFZVlrR +VOzxphWoZnAoGDWv41OK6Y2NTsJA6rXeIv3D3TcmxMyba1ovcOWyR5pYTDQqkThV +q2nklI8x+h7o5N7hMWIyxYhZFQMWd1NxxZ12LISrk/vD3rTSipDwrO+hEkTZP40p +lNNwmshzHZ2R1/z7m1TIlVEjKNp+w0PMKH1TGmDHmT2sORCL2Hbo6AOgZi/3euPu ++5ipxrbkV6cJnJh4/wpipl7dmVDDy81fCeAAbmbvfB9PefgEfPRFlrVXuYuY0w1t +cwIDAQABo2gwZjAkBgNVHREEHTAbghlkZ2F0ZS5kZXYua3IuZGF0YXNha2VyLmlv +MB0GA1UdDgQWBBTIkFbBsCh4m99Lz3R4DpNncMAw/TAfBgNVHSMEGDAWgBTEPJx7 +89CPnkZ0otuFE7b8s+SzGjANBgkqhkiG9w0BAQsFAAOCAgEAx31IOe39f4n5J7/E +wFpegE49QcdOlzEMzMQWEFg71OmqYIzLVnQ8RblkHMLPdMIPTqrUG3GLqAKbBvNU +8/degg1XPq2OJseo8+cyVZkEamknVjNNK4/fOxYQ2BwiFW/G+e26El6Fjg/jEHeq +w8wZXoaSF8HlZ11Lynd6EStUBsKmY0Ld/pw4EGdOm5uFpLcwhYZaQ+aFS1fX6CxB +uThYxuMXKRKr78GyQUzeHKn5wzvZtlzmNdf0xB5/kiwSEpZicAZ+ZFKRe/C5BAIR +tDmBTIb3NeBoo2h3elbvLeIC5+aW9zWXSxUOOa4QFfcCcN4+K9SVh6DqgworEhFw +iWuCh2tOYJsWnoUyoOc3A5EdKXXZOU/xvjjKPM5OLEQ0B4vuvbQref59haMToaPP +LXEiE1GPJEgzR4Pem81fjZXmtMxI44YEUqg3iN7Q7urKYdqdWnVpIdECYy9nULfN +cRrJZIRTPYIhj2xr2I9JW3i7asiF3Ht3VVNl/YKYwSIv9fLPT03lbzfijcIYLVE+ +m0zzKbUOqA0S2yekzRswEF5H3QAekaix6ov81UmLM3iBPVXM+sZi7k0pZNp8bt4l +fxP1MypfQC6XvvBo74XBKj9rJSKUIhW+ZjydKGv1jQzRAUZtslYfrtXx3hDeTjaB +7DsclIKNo+8jKLC7ifCl2iK0UV0= +-----END CERTIFICATE----- diff --git a/ansible/roles/helm_install/files/vault/tls/client-key.pem b/ansible/roles/helm_install/files/vault/tls/client-key.pem new file mode 100644 index 0000000..3ca5773 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/client-key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCxCa4ZQy9G+sIK +VIquGnkBkPfBI7AeocbslM+jnj17TqqJPMSOGVZGlCktKVqGd6LkuI5mf53IhYbx +/mU85UdgQPYEI0JcfIkNejYybtd1t0+/jwqx3t8YR/xG1ShOwqY2tx0ieoW4aKJE +k/JT/LlNKgQwcxdFt1ShwaXKo2FcHgfY9hIQDKcwtSn/Clz0VM0VuOAdFLiPypf4 +nDqHS+aKgjAt9yZaiZwhbMGWhTT4pR0hMsGiwwXw6tKEeWoVaAkg2wlY7ABZQRpg +sqoNKXdTpkl6apryUJ3M/PvBce5JyMErVg5Rb2s2wLpK0fakrxE7sgLVffydhICv +LLr80Msq7zJw6kJPtr9I5b3U96wNrrruioLJvXYlv1KX7jwEc+4FADCXvNf6geCe +mXTRJ7Sn1qFA82q6YfdcMQFZVlrRVOzxphWoZnAoGDWv41OK6Y2NTsJA6rXeIv3D +3TcmxMyba1ovcOWyR5pYTDQqkThVq2nklI8x+h7o5N7hMWIyxYhZFQMWd1NxxZ12 +LISrk/vD3rTSipDwrO+hEkTZP40plNNwmshzHZ2R1/z7m1TIlVEjKNp+w0PMKH1T +GmDHmT2sORCL2Hbo6AOgZi/3euPu+5ipxrbkV6cJnJh4/wpipl7dmVDDy81fCeAA +bmbvfB9PefgEfPRFlrVXuYuY0w1tcwIDAQABAoICAAdyPUjhuwjUvjmaAbU99s36 +j3knq542Nrw3rB4ZJ0Pa9LBOBoRli2vvimXUYfLa8FaHbrHf9z56Y0klZpOvnxds +1AwrgXyLXmZbOBFZ+SUB31BSz2PY2HBYgsNl7sqVRFFz1T1gguRVPlH9GQmwTQjy +VffFt3pesigVkYga5BrwRms5OlxDc/rH++q2wF8ke/XIb2C5wOOHZWn3BD0xk5JK +1ITyasIPHKqJPQE8PBSjOTzuuhdC9aqC7fTVeIFK/WtM2zFKx975LodXrHBeSJ8T +lO6Zskez3VdEYsBprtiaoYlVlseVFKDabVt9Rk758BMfMyBQqsHjU1hK3NiKXakw +Omb7+Jwo9AEq+9bToCdDbKyhqmgbZ6ow/ivLfzQCf4NhyhRt51KCS4CeWwqzxo5Q +eN6vl97d2xxzF9ur5hv1SlqJyOP1LtmyVf3U+7xhD/BsBVE8y9HmneeSnM/K9MFZ +cfdAPI0o2DJCK4hOtHYkf1ISIsZ06YZ+a7s18qPc439ArYM7Tu0GL3/X1C1dgLN/ +xcnwVvSnGfDyV1jJAMhWptenct0ubbl9BEmeML2dOp0XaKb6VCofHXcxVwlmvbYp +B7bhluFN32zw3poBbgWh3vP40px4CDXfZsp1XSoovephiEIo5oblvLTOzFHeRd+Q +zlzn3r8hnXcIb1pSp17hAoIBAQDXq+MpKqtpe2s4fdXTrTZ3ptuujgq490eLOKVR +JjPv83HsUvfYj4Hrju0X4BDy+8+P5fdXCgAFR/sT6Ydea1r3f0VYUsyuGMPQCaYt +y2fJrC/yFtLrsIGii9+aQhqDXBlImgMKp3xpBtB4ZIeRsqTeuGSjgEHCKV7DmJS3 +C3ZnvbsDhkaPxoS7iLjx8TEGzMBeLMpyXZsjFqJa9kmU+tg2ILpDMEp+fUjzahkE +y4YYVXAK3AjjBoOv/nrg/hyuKtBYLcfFfs8vfCA0/j1YMskt4mSAtthYz/kv4aSA +oE72xpVER9coq4oGklzqPjNOV+bs25r66x/AF4VuYVqCFrjTAoIBAQDSJGpV6PY1 +YcJfp8BoxMKLRalrKQHnB7R9V92/lk0tPBs+0oGSHW7q7ISdtMk7PtmS6lvRK4Qy +bj+Fi3eIyei+0lCsW3hLX+aQLSME1nDIpP/v5SARCFyB8us5rknluXoVcNfSOW/s +eVTOlVDI73BZxLfC6mKGn3TeD62az6wEscLgJfB4+PNv6Y0jK61J/CqadlHqVPRL +/I9uDO+R6GyiPsfGY2VJLrABD0YaDiFh49kxPf65REDIZzoJKfJ/DcKYUYX2G7n+ +TQJS8vUnAghrcjXgq3XbBYI6aLz7o9c8Pdxfv6GJNCWCBEQjDNuzNnN+Tfxcx7hD +UEKVnK73+ZThAoIBAQC7H5oCt9DtvnWzGAsUk+70NcPZWciX34UlmEpndDwP7ytj +HkGIlkrlNKcl1fUevbjDY4YM8NI/wspn5nDbTU4p+zURgxWv4DDVKeoTRa6RXmJj +K8ZpVEXF4JJrZvtWCeyt/2KwNc1N2um5649srJE75MT8z2U+r2JXVuWBQ+LB8+51 +CmXijR3RELlWwiwFeFVX4IjuXaUi9pp7TcFeD57Up+S0MwwPX2iSOfXnIScBqe+C +nJLhV1+era+z0wbTXrZSK7Zu9q2Qx6OyFvzmr3m54/nLa9mehSPxlVeDepPIrVgJ +XEOkN+GT/h2756fgS4hC7e8z0hlEM6RDtxguUsSdAoIBAQC7v49FzErVLM+V2PG+ +aTULa6TPOicZ+7dVWYCkYKv5Yz9QZJEhM6zatZBSuqgjh6aqbTq1ajTFPd0A+rdE +fmi0vIEZTOvwI/GCFOG5u8GuckS8V7DLpvXHoHpq4X3iglFCALzCdVC7LTgJDjzM +Gaj+exRXUEPQ1Xx7vfQ0YaDl3NAopZzw6DPPZBIWPzqm5GZH9LiBLX/KuA6Jm4Mb +kFYmqwKgDEP9G3pip/vQ4ZkPtZtiNGp1qjZbFmcyssiWnXujzQpr9R+6xaDy7WD8 +hgNxw+vBHE/C7GwKtXNLPHnbt8VBwpuXUwDD5f3edul/d4G+cfysBEBr27RxqThm +ExXhAoIBACu/PJGQae6MGOOszAyEQb4c8NKanC3TikXcTYali6veodbyAVv0BpkZ +Kbn5NSMZSQZBNdeSAOQAFNaTEh7jA9zFPtFZMpSG1fjx8dlC+ZTB8746wKunwbVS +yspLcC3U1AXChbF6EHRxYnWKpTQVYtn3npTjE95I9PK7dPvM9Qv74EiXhpOy7orN +hj1XTLP89uR70gLkCjWnMi48qoyMbKrnHJR/H7qZXIfkHUA+wrzJ0sMVmDvcXV+P +9oBJIUM0PGBmqLYbhG/mkiAuX+KJyHXcJY9YxRi9nyqptZPH+igQPC0pNaDtajm/ +2ePO3b8j2RJQp1ABqH7ZT0TVUwH/vWo= +-----END PRIVATE KEY----- diff --git a/ansible/roles/helm_install/files/vault/tls/client-req.pem b/ansible/roles/helm_install/files/vault/tls/client-req.pem new file mode 100644 index 0000000..61e62c2 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/client-req.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEpTCCAo0CAQAwYDELMAkGA1UEBhMCS1IxDTALBgNVBAgMBEFTSUExDjAMBgNV +BAcMBVNFT1VMMQ0wCwYDVQQKDARFWEVNMQ4wDAYDVQQLDAVDTE9VRDETMBEGA1UE +AwwKVEVTVC1BR0VOVDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALEJ +rhlDL0b6wgpUiq4aeQGQ98EjsB6hxuyUz6OePXtOqok8xI4ZVkaUKS0pWoZ3ouS4 +jmZ/nciFhvH+ZTzlR2BA9gQjQlx8iQ16NjJu13W3T7+PCrHe3xhH/EbVKE7Cpja3 +HSJ6hbhookST8lP8uU0qBDBzF0W3VKHBpcqjYVweB9j2EhAMpzC1Kf8KXPRUzRW4 +4B0UuI/Kl/icOodL5oqCMC33JlqJnCFswZaFNPilHSEywaLDBfDq0oR5ahVoCSDb +CVjsAFlBGmCyqg0pd1OmSXpqmvJQncz8+8Fx7knIwStWDlFvazbAukrR9qSvETuy +AtV9/J2EgK8suvzQyyrvMnDqQk+2v0jlvdT3rA2uuu6Kgsm9diW/UpfuPARz7gUA +MJe81/qB4J6ZdNEntKfWoUDzarph91wxAVlWWtFU7PGmFahmcCgYNa/jU4rpjY1O +wkDqtd4i/cPdNybEzJtrWi9w5bJHmlhMNCqROFWraeSUjzH6Hujk3uExYjLFiFkV +AxZ3U3HFnXYshKuT+8PetNKKkPCs76ESRNk/jSmU03CayHMdnZHX/PubVMiVUSMo +2n7DQ8wofVMaYMeZPaw5EIvYdujoA6BmL/d64+77mKnGtuRXpwmcmHj/CmKmXt2Z +UMPLzV8J4ABuZu98H095+AR89EWWtVe5i5jTDW1zAgMBAAGgADANBgkqhkiG9w0B +AQsFAAOCAgEAo35RHmcSHDmQDdGNOprlWMeuNbajRsERAKKuETPVHmh6QikNBD2R +l971+Feva9W0xtxNrWoly0auHelHtoRt7sBEfy5dGkVmcQOGfrUFZKOeZFjXg32d +dJdYnjur4nYUbD8sufo9711RAJz75fYUAa88pNhdrgNb5jpdBOrYVp4Xzo582wSt +VKKBYpYIy9yK1sAfUdk+yyr4XfM0GJseK5Fbbb4numqhFNlwWH26QkkbjKGZz4BM +AhC/2qaG0b7Dm691evSnPWoS++sQTtc70d/4WJp9NoxQie4wCabqgx+sG1yN9JRY +YHirPRPBBtkK9Pt6Vj99ahKWjqlknAHgiBF6CpSc/UVCqUgV1/aKRk1YSkHJpuZi +2H6w+TjF0o6vIDh2ayKdE/nib7TIbqIL1canxymePbqLN0dpIQYaUpH91lCP2rSy +Fng1jChMmN/pLi3ucx+3CTWSzrAve/2F1dC52HsAtVrKUDeg0WRk5onoxKHll0X8 +H1lgHzBfSw+25kdAkOhwUBNuOXHig7Qks58JVsMmfRe26GFbTrHz3D47d8p9U/pP +muhGiOekl6/sg3rLLfZraGw9pT1kqgGKTBaAR23BLLLyTnhZLUZzL0Hl88PObrlj +UXDGbmN6knc8vpnYFJN+Gm3zFwJlnqndwPUhu8UjLgdpI4ybFMOK+2U= +-----END CERTIFICATE REQUEST----- diff --git a/ansible/roles/helm_install/files/vault/tls/ext.conf b/ansible/roles/helm_install/files/vault/tls/ext.conf new file mode 100644 index 0000000..24dd565 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/ext.conf @@ -0,0 +1 @@ +subjectAltName=DNS:dgate.dev.kr.datasaker.io diff --git a/ansible/roles/helm_install/files/vault/tls/generator.sh b/ansible/roles/helm_install/files/vault/tls/generator.sh new file mode 100755 index 0000000..4f83989 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/generator.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +########################### generator.sh ################################ + +echo "Generating TLS files..." +rm -f *.pem + +# CA 인증기관의 private key 와 self-signed certificate(자체 서명 인증서)를 생성한다. +# -x509 : +# -newkey : Key 생성 방식 +# -node : CA인증 개인키를 암호화 수행을 하지 않는다. +# -days : 인증서 유효기간을 설정한다. +# -keyout : 개인 키를 입력한 파일명으로 출력한다. +# -out : cert파일을 입력한 파일명으로 출력한다. +# -subj : 입력하지 않으면 CSR 생성 시 사용자 정보를 입력해야 된다. +# - C : 나라이름 +# - ST : 지역이름 +# - L : 도시이름 +# - O : 기관 +# - OU : 기관이름 +# - CN : Common Name + +echo "Generating CA private key..." +openssl req -x509 -newkey rsa:4096 -nodes -days 3650 -keyout ca-key.pem -out ca-cert.pem -subj "/C=KR/ST=ASIA/L=SEOUL/O=EXEM/OU=CLOUD/CN=EXEM" + +# CA Cert파일 결과를 출력합니다. +# echo "CA's self-signed certificate" +# openssl x509 -in ca-cert.pem -noout -text + +# 서버의 개인키와 CSR를 생성합니다. +# -node : 개인키 암호화를 수행하지 않는다. +echo "Generating server private key..." +openssl req -newkey rsa:4096 -nodes -keyout server-key.pem -out server-req.pem -subj "/C=KR/ST=ASIA/L=SEOUL/O=EXEM/OU=CLOUD/CN=DATAGATE" + + +# Kubernetes의 환경에서는 Kubernetes의 Service명으로 DNS 등록이 가능하다. +# 만약 IP를 이용하여 subjectAltName을 등록할때는 IP:0.0.0.0 으로 등록하면 된다. +#cat > ext.conf <<-EOF +#subjectAltName = @alt_names +#[alt_names] +#DNS.1=sam-datagate +#DNS.2=sam-datagate-develop +#DNS.3=sam-datagate-cloud-1675 +#DNS.4=10.10.34.129 +#EOF + +cat > ext.conf <<-EOF +subjectAltName=DNS:dgate.dev.kr.datasaker.io +EOF + +# 서버 인증서 요청에 서명합니다. (CSR) +echo "Generating server certificate..." +openssl x509 -req -in server-req.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile ext.conf + +# Server Cert파일 결과를 출력합니다. +# echo "Server's signed certificate" +# openssl x509 -in server-cert.pem -noout -text + +# CA, Server 인증서 확인 +echo "Verifying certificate" +openssl verify -CAfile ca-cert.pem server-cert.pem + +# Client의 개인키와 인증서 서명 요청(CSR)를 생성합니다. +echo "Generating client private key..." +openssl req -newkey rsa:4096 -nodes -keyout client-key.pem -out client-req.pem -subj "/C=KR/ST=ASIA/L=SEOUL/O=EXEM/OU=CLOUD/CN=TEST-AGENT" + +# Client 인증서 요청에 서명합니다. (CSR) +echo "Generating client certificate..." +openssl x509 -req -in client-req.pem -days 3650 -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -extfile ext.conf + +# Client 인증서 서명 결과 출력 +# echo "Client's signed certificate" +# openssl x509 -in client-cert.pem -noout -text + +NAMESPACE="dsk-middle" +VAULT_NAME="vault-0" + +CA_CERT_NAME="ca-cert.pem" +CLIENT_CERT_NAME="client-cert.pem" +CLIENT_KEY_NAME="client-key.pem" +kubectl exec -it -n "$NAMESPACE" "$VAULT_NAME" -- \ +vault kv put -mount=tls client \ +ca_cert=$(openssl base64 -A -in "$CA_CERT_NAME") \ +cert=$(openssl base64 -A -in "$CLIENT_CERT_NAME") \ +key=$(openssl base64 -A -in "$CLIENT_KEY_NAME") + + +SERVER_CERT_NAME="server-cert.pem" +SERVER_KEY_NAME="server-key.pem" +kubectl exec -it -n "$NAMESPACE" "$VAULT_NAME" -- \ +vault kv put -mount=tls server \ +ca_cert=$(openssl base64 -A -in "$CA_CERT_NAME") \ +cert=$(openssl base64 -A -in "$SERVER_CERT_NAME") \ +key=$(openssl base64 -A -in "$SERVER_KEY_NAME") diff --git a/ansible/roles/helm_install/files/vault/tls/server-cert.pem b/ansible/roles/helm_install/files/vault/tls/server-cert.pem new file mode 100644 index 0000000..594542d --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/server-cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFrjCCA5agAwIBAgIUeYr9DLb76Ps4kU+TjPUqWGi++/EwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCS1IxDTALBgNVBAgMBEFTSUExDjAMBgNVBAcMBVNFT1VM +MQ0wCwYDVQQKDARFWEVNMQ4wDAYDVQQLDAVDTE9VRDENMAsGA1UEAwwERVhFTTAe +Fw0yMjEyMDYwNzA3MjBaFw0yMzAxMDUwNzA3MjBaMF4xCzAJBgNVBAYTAktSMQ0w +CwYDVQQIDARBU0lBMQ4wDAYDVQQHDAVTRU9VTDENMAsGA1UECgwERVhFTTEOMAwG +A1UECwwFQ0xPVUQxETAPBgNVBAMMCERBVEFHQVRFMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEAv7EV2uwFSxTZMK5EG+QhuyZivAEjaRSrxqYbbqARWGVb +wgg6fseiwudn+4rgQTSr+FIH5xEbQxD5WncTPE4L8gwYTZ+eNykii4d82xqQUeEN +PeQ2CWBeYJfgRdN7AflU+BwbOjeErXdgxJybABnVoXdxusfo0freVSYseiT/ZLaD +0u7lK6srbY1vUwxorgQRhi0DD2Hn4SrXQv2K6y9djW2YusPOSHQcd7yK+J+30N9k +vfe7sdKiinHM65lezwPyx34PY+2RqoqqLDa1MpBpialxQuiGpz7UexNuiNFJTZbK +YmGPiMPnTMbjyqyU4tmMUhSKPFa1cAam/nDtVbzS1u9LY3Q8FMUnQV8TeBQ0cvqZ +rp0bvI3Z6ogpJeMTDOr/obLuFu7bhPnsGLEmL6BIZPeiQuCzReq0TSHWgoMWGWAn +kZoFpGgWVjx9TifoCPgvx1ULsTbPmXBs9b9NcFyLtlAuCrrnCK5zhbBVCGtAhrhE +TapQuaFK7glsncSRJMmivfVXcdnWlb6KpKmwzJl2XdMf/e+cxDxwK03U+NkInfuY +U3yo3mfL7ZZfjShEtOoKSqeWIjXPmp3qP1Ih5EMN13lBdkybV8t9ll33zpHfx57I +3B9deSz1pTnCnQ/tX29ENISs9bac0aPgCgecjO/UWkDSfwRfwbgKEAY0g+2HYxkC +AwEAAaNoMGYwJAYDVR0RBB0wG4IZZGdhdGUuZGV2LmtyLmRhdGFzYWtlci5pbzAd +BgNVHQ4EFgQU3HFs3DszdXhH2F978fk6+G4NixcwHwYDVR0jBBgwFoAUxDyce/PQ +j55GdKLbhRO2/LPksxowDQYJKoZIhvcNAQELBQADggIBAL4MoIJvwk9S512WG2kP +mCyDt1WIej1N5hioGH41XLTYBgCRZR53IiFKE5GuL0aDd9CKOPptMXDxpJJuzQBx +0FCQl5qrDpryGcRad+5ljcTcCyQTv/eeoJqR8YXu0N8/ToJ2412nc042YJ+Yueh+ +gy8bOtKnh5QlG1/wC2BNpE7W6DBglz10KfiXwBUz1aDhb+GX2HSPLBko/MvVFdy/ +jjVf7eJrXymPnWtQxNRurzfpT31xv2slXzsNXu53/jMeTF0ScjRltVos5cmjB+2r +7IIVhlQ767ScTCb+OrAfEJ7wCEcYVYN3+VHAYkSYYdg+slXr3lxyYgaeeQWc7zNw +5ClzNOP/juAfwC6JiCpJxR3l3pqQaaEbzWeAIUbKRaSlECV93lc1sraPVjEiAxog +2VlmtLD3MnyC8FXomIZNaZdh68cmiNsPehecWb10qrewiLYgFJQhaRSuQJ35CO0Y +u9E3JSZpVOJB6OoDNkV6AMWvDaimDtCbX8GwwNwoc0/rNc5Z6qZdjBsTfzUb/opc +XMsj/yuGZAaBe5Slqz+5FQKUMd4RrDVnZzgqriIB5jlXw8sRn+iiC/UYjQfyPiwk +lHccKWsb9CL2y0gpot+jg0e+A2Y3UO6v4opGXoSteaEj6Iqn+wINE0RJ+XTQKKao +VzjxmZAfIq9hRA6+IO3MschG +-----END CERTIFICATE----- diff --git a/ansible/roles/helm_install/files/vault/tls/server-key.pem b/ansible/roles/helm_install/files/vault/tls/server-key.pem new file mode 100644 index 0000000..d05ad90 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/server-key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQC/sRXa7AVLFNkw +rkQb5CG7JmK8ASNpFKvGphtuoBFYZVvCCDp+x6LC52f7iuBBNKv4UgfnERtDEPla +dxM8TgvyDBhNn543KSKLh3zbGpBR4Q095DYJYF5gl+BF03sB+VT4HBs6N4Std2DE +nJsAGdWhd3G6x+jR+t5VJix6JP9ktoPS7uUrqyttjW9TDGiuBBGGLQMPYefhKtdC +/YrrL12NbZi6w85IdBx3vIr4n7fQ32S997ux0qKKcczrmV7PA/LHfg9j7ZGqiqos +NrUykGmJqXFC6IanPtR7E26I0UlNlspiYY+Iw+dMxuPKrJTi2YxSFIo8VrVwBqb+ +cO1VvNLW70tjdDwUxSdBXxN4FDRy+pmunRu8jdnqiCkl4xMM6v+hsu4W7tuE+ewY +sSYvoEhk96JC4LNF6rRNIdaCgxYZYCeRmgWkaBZWPH1OJ+gI+C/HVQuxNs+ZcGz1 +v01wXIu2UC4KuucIrnOFsFUIa0CGuERNqlC5oUruCWydxJEkyaK99Vdx2daVvoqk +qbDMmXZd0x/975zEPHArTdT42Qid+5hTfKjeZ8vtll+NKES06gpKp5YiNc+aneo/ +UiHkQw3XeUF2TJtXy32WXffOkd/HnsjcH115LPWlOcKdD+1fb0Q0hKz1tpzRo+AK +B5yM79RaQNJ/BF/BuAoQBjSD7YdjGQIDAQABAoICABaR/r1Gof7rpwEE288e5tvW +0rRJPAag4PLkVwGJBlHOqWWou153GhJx1bv355y123PDENwjlv6oDnwY3iJxHBX1 +V504KJRdGwyruMdBkvGZGoqJhtVGzAX6i/Ucx/R9C4SlJo2NwOj9z96Gg/eYkx9O +ZY01AZRLwgbrljwRhCKzHMVmzIP+RQBCsXNpWB/5KakPPrd+cyN/fFYObV9wtcaz +u1JuQKkmRr3QbNqGT8nfq8h0VVzELNA5QwQQjGm6kMjtbll5PPgEGLJZBK+98jc7 +xLK4lzY0/ya4rICeh8+DbDP2QcS8ME1Jc8PG20b0j4GQmpAtM3LLzbjSyhcYwwhI +cbWzhlxcy7AmIQ9kdd6aodI8cqmCcf85xrLV4SFlFWHjul9uoFIPVQKhcbNu9rp+ +OOLpXHuA3KTG2I+HBGU6rmkZ0h3hhnzK+KY/ecg4ZnqVaHdbrnyuua/U1lSG+T2/ +RBWr0GFOHqzW/STOe5k0joYH7hVncymDw9DrRHSjeYEZjMgadBOtH+zXtL5Slfzb +OMIgkrcOJY4l4JNgg2ts+fPhXAbB7RUDFPUsW9mi79pk9JDn8MNSk3dPI9T3ZFG7 +b9FG69HfK2OsnWhjQexgAVgsdyRpgMem8z/rSKw1aMwXvFGne7o/N7YaVO3w0/PA +XY0GjI8YiE8Idtkd4HRBAoIBAQDUloMgTFcv/PSHQAg5/zmfN+eNQZE2Zwft1zZD +JsYDU1kXB4iaheqsfBrHagVxyiBZaFwPGEOuS5S+6ODSB+zaiGZeZ+yj8XcYOQNj +8y693ZFkqIaliAV7sNoSwsESvOGFeWCKWnx0If6uLp6170sQ1nST4TnSHlatgy1u ++k+HBgmQChPvPvp8ov8zS59TzHEI1qfsd1Jq6fS4I6SUA6bYLtnifatAWRuag/A7 +X5jAU83+jpdR/89A9k2DiowrW5Kp214/Ac66J7NhIcdiMl1EDw8tPxHv5OlOYEre +z8NSDwJMIqcZlBFskQv3YGaJLCuOzkz5QIpSyFrS55t8OLYJAoIBAQDm1i4IuabA +rGZ+cZnm31BDw2r9k2gy45PEtFkSKht4jvlMhOXpyEjq4weaEbqW/b84agB9D5Nv +NnRcLrF7DEZUgCPo86LYHsG7mXaaWmnKGQywVxCehZZ4ZZFH6iK2hANFS528TL88 +QoiHbw7NgWoYRL2NqkJEbm31Th7vYQ70eoxAWTSa2Tk/9fI1xCQni+tjzZRForHv +XLNgki91UDW2CS/5nkQGhukAlh47zXB4KDO/G/M+ZGmiCBpC3gE29Hd6PDgcByeZ +FOaWSY/uS2CA65dfloUTQfYrFagMos9zYaKJNbZ59HCQJ4T0SIx1F4isf8uG/z8w +U/xFK2Gb+AiRAoIBABYitfSe/lUY2b1bhX/Ee5OAlrO2qBAqGkFBr3y+hM/D1fr3 +5dlxKEZdhcxN1dNLYKLuoudPd3ymkg5u4Tn9aUQ3/7DE0XcgpW34QcoegDd04rbA +dYS27Rs8Bvj9Hm19DV6AjPTbBypwf7xTTmNTT1WpfuoHAaZHUm8uwYRUvAMrNcOx +yPGYdSyVLE2DCkwHdZ2d/E46/nHzem/SBkkDOFQ6TKWwwCwrGxyzsEf+ZEFieIiR +AGChaksxWlDgFEhPeK4HEYEuBwCLMbpjcgdfhyaXCtWDQ7Le+wwWKrWYnJXmyi5z +TUL0j/vl/oD7oLGXz9FG2ez6M2z7P7nGNAJzRUECggEASgbbom6vHFDtWZVizphE +7EZKFGHmY5N5+vX/LoUUZDgtmK1x8eQKBlGiSXVwCiX74cKUv4Z0OJEVBc7vmc9Y +6pZhbb5cNcS/SkMvttZR4L7hw9dX/A5JL/kaex6J3VA+oEVco1tVZKTNlek2rQ7Y +kEnTJBA6yilD2AwifY/5tMtsGOLCrPoGkw1zjGyHT6teZJYz+5TuTR3EZK7cy1la +sDMIEJwBoro3FLoPngwHCnGSDrKO8i5Pdef1TAZYC2CgxDF1qP9eYohCXmXe44OG +wjFLTRs/N+rKYAzE5LB9qLnh3vC7wSZzxrb1u0VczdwrN26QPY2znPMCDsiXt+X2 +EQKCAQBQj8LEoX+FVdTtFw7mMtcrpTs9JhWzA4emQSdEWxSLeXalAK/HM5xvF8kq +OPN4wVZYbhJyFCBxQAo47FB53WrcUKc/01E5RRsNALAbRszCAjMQ5X50TrkQCS+X +VeXCS76KnpH9yDfBxYis4eFtH6CvRHO572MpbJzjp4n4ZTVYws02BVVF6Jynmc66 +LDCw3lHaK+K0A5qX4MPTTXwELHMy4bJsWrI2r4PuMygJ8lrRxg3/YlOq9ZEegvBq +Dlv8nLIYYCRY8TRGVhtwQtEuuqrV+phFrkOwp/46UCsr7U2pTphNfebhfL5R1l2h +eAT5gUCcf6OjqDgIpD32xijmzp+r +-----END PRIVATE KEY----- diff --git a/ansible/roles/helm_install/files/vault/tls/server-req.pem b/ansible/roles/helm_install/files/vault/tls/server-req.pem new file mode 100644 index 0000000..239a981 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/tls/server-req.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEozCCAosCAQAwXjELMAkGA1UEBhMCS1IxDTALBgNVBAgMBEFTSUExDjAMBgNV +BAcMBVNFT1VMMQ0wCwYDVQQKDARFWEVNMQ4wDAYDVQQLDAVDTE9VRDERMA8GA1UE +AwwIREFUQUdBVEUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/sRXa +7AVLFNkwrkQb5CG7JmK8ASNpFKvGphtuoBFYZVvCCDp+x6LC52f7iuBBNKv4Ugfn +ERtDEPladxM8TgvyDBhNn543KSKLh3zbGpBR4Q095DYJYF5gl+BF03sB+VT4HBs6 +N4Std2DEnJsAGdWhd3G6x+jR+t5VJix6JP9ktoPS7uUrqyttjW9TDGiuBBGGLQMP +YefhKtdC/YrrL12NbZi6w85IdBx3vIr4n7fQ32S997ux0qKKcczrmV7PA/LHfg9j +7ZGqiqosNrUykGmJqXFC6IanPtR7E26I0UlNlspiYY+Iw+dMxuPKrJTi2YxSFIo8 +VrVwBqb+cO1VvNLW70tjdDwUxSdBXxN4FDRy+pmunRu8jdnqiCkl4xMM6v+hsu4W +7tuE+ewYsSYvoEhk96JC4LNF6rRNIdaCgxYZYCeRmgWkaBZWPH1OJ+gI+C/HVQux +Ns+ZcGz1v01wXIu2UC4KuucIrnOFsFUIa0CGuERNqlC5oUruCWydxJEkyaK99Vdx +2daVvoqkqbDMmXZd0x/975zEPHArTdT42Qid+5hTfKjeZ8vtll+NKES06gpKp5Yi +Nc+aneo/UiHkQw3XeUF2TJtXy32WXffOkd/HnsjcH115LPWlOcKdD+1fb0Q0hKz1 +tpzRo+AKB5yM79RaQNJ/BF/BuAoQBjSD7YdjGQIDAQABoAAwDQYJKoZIhvcNAQEL +BQADggIBAEyXArw9/0KHqS2WWpbd6/3LyrRISPE+WSH9cErOmiU9caDxbQsRLiW6 +I0G6MoJxkScsNy7i3vbQYIb5Ur+jqcC6zKZXevL9fVmYugrlR2LC9pPUVSD+brol +ooUGdjVNtEucNzCLgKg11WYp8zgt1E06mk5FYimCFWFA84ZDcFndBWpoExuTSLLc +dyQfmPQ40RAyT3qD55d0J+IiKhfWBq1blTY5uHaEp7Ok14ukSK5baA76tnpm89vU +eWM+TpaQUqYCcGT54xbcy6gS3F4slpeHBK0Lq1H9nKl0+GZWf9q9OsQNpVTW+q/y +ukBiEYHxx81LxCHrbOcRjCv9NGMVPiMGHn3wx02BIswtzp9HNeNBzno48DhBpJgt +R7tGcF102BA5P0winooqiCcDEeHC6c+MNWBIxT9dT6+jJNBLKTJuYND55BDzoeJX +O6brWipR//OHlcFDKxf+ZyukGuV+geG+7qpjc8RKqVhK2+Z1rioY/toIRuUTvkIO +KS6BCAlpr3Mavx484FUmZe9K8X3KmS1i/ItKBV5RhNwip9wOFdyQ8BmSAos0WyL8 +EBcNj2u2tUTQs+w5A6LNI+HVYeFhXgwJH4yOilb7iYhcig7lPaqCFR1RIARqUW4G +8AYasXX8bMbghDRFlIOdOdEMI/iaEC2lOfa4Jo3hKuV4YUqDUh2S +-----END CERTIFICATE REQUEST----- diff --git a/ansible/roles/helm_install/files/vault/values.openshift.yaml b/ansible/roles/helm_install/files/vault/values.openshift.yaml new file mode 100644 index 0000000..ee00563 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/values.openshift.yaml @@ -0,0 +1,18 @@ +# These overrides are appropriate defaults for deploying this chart on OpenShift + +global: + openshift: true + +injector: + image: + repository: "registry.connect.redhat.com/hashicorp/vault-k8s" + tag: "1.0.1-ubi" + + agentImage: + repository: "registry.connect.redhat.com/hashicorp/vault" + tag: "1.12.0-ubi" + +server: + image: + repository: "registry.connect.redhat.com/hashicorp/vault" + tag: "1.12.0-ubi" diff --git a/ansible/roles/helm_install/files/vault/values.schema.json b/ansible/roles/helm_install/files/vault/values.schema.json new file mode 100644 index 0000000..676efb7 --- /dev/null +++ b/ansible/roles/helm_install/files/vault/values.schema.json @@ -0,0 +1,1030 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "csi": { + "type": "object", + "properties": { + "daemonSet": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "extraLabels": { + "type": "object" + }, + "kubeletRootDir": { + "type": "string" + }, + "providersDir": { + "type": "string" + }, + "securityContext": { + "type": "object", + "properties": { + "container": { + "type": [ + "object", + "string" + ] + }, + "pod": { + "type": [ + "object", + "string" + ] + } + } + }, + "updateStrategy": { + "type": "object", + "properties": { + "maxUnavailable": { + "type": "string" + }, + "type": { + "type": "string" + } + } + } + } + }, + "debug": { + "type": "boolean" + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "extraArgs": { + "type": "array" + }, + "image": { + "type": "object", + "properties": { + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "livenessProbe": { + "type": "object", + "properties": { + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "pod": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "extraLabels": { + "type": "object" + }, + "tolerations": { + "type": [ + "null", + "array", + "string" + ] + } + } + }, + "priorityClassName": { + "type": "string" + }, + "readinessProbe": { + "type": "object", + "properties": { + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "resources": { + "type": "object" + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "extraLabels": { + "type": "object" + } + } + }, + "volumeMounts": { + "type": [ + "null", + "array" + ] + }, + "volumes": { + "type": [ + "null", + "array" + ] + } + } + }, + "global": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "externalVaultAddr": { + "type": "string" + }, + "imagePullSecrets": { + "type": "array" + }, + "openshift": { + "type": "boolean" + }, + "psp": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enable": { + "type": "boolean" + } + } + }, + "tlsDisable": { + "type": "boolean" + } + } + }, + "injector": { + "type": "object", + "properties": { + "affinity": { + "type": [ + "object", + "string" + ] + }, + "agentDefaults": { + "type": "object", + "properties": { + "cpuLimit": { + "type": "string" + }, + "cpuRequest": { + "type": "string" + }, + "memLimit": { + "type": "string" + }, + "memRequest": { + "type": "string" + }, + "template": { + "type": "string" + }, + "templateConfig": { + "type": "object", + "properties": { + "exitOnRetryFailure": { + "type": "boolean" + }, + "staticSecretRenderInterval": { + "type": "string" + } + } + } + } + }, + "agentImage": { + "type": "object", + "properties": { + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "authPath": { + "type": "string" + }, + "certs": { + "type": "object", + "properties": { + "caBundle": { + "type": "string" + }, + "certName": { + "type": "string" + }, + "keyName": { + "type": "string" + }, + "secretName": { + "type": [ + "null", + "string" + ] + } + } + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "externalVaultAddr": { + "type": "string" + }, + "extraEnvironmentVars": { + "type": "object" + }, + "extraLabels": { + "type": "object" + }, + "failurePolicy": { + "type": "string" + }, + "hostNetwork": { + "type": "boolean" + }, + "image": { + "type": "object", + "properties": { + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "leaderElector": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "logFormat": { + "type": "string" + }, + "logLevel": { + "type": "string" + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "namespaceSelector": { + "type": "object" + }, + "nodeSelector": { + "type": [ + "null", + "object", + "string" + ] + }, + "objectSelector": { + "type": [ + "object", + "string" + ] + }, + "podDisruptionBudget": { + "type": "object" + }, + "port": { + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "revokeOnShutdown": { + "type": "boolean" + }, + "securityContext": { + "type": "object", + "properties": { + "container": { + "type": [ + "object", + "string" + ] + }, + "pod": { + "type": [ + "object", + "string" + ] + } + } + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + } + } + }, + "strategy": { + "type": [ + "object", + "string" + ] + }, + "tolerations": { + "type": [ + "null", + "array", + "string" + ] + }, + "topologySpreadConstraints": { + "type": [ + "null", + "array", + "string" + ] + }, + "webhook": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "failurePolicy": { + "type": "string" + }, + "matchPolicy": { + "type": "string" + }, + "namespaceSelector": { + "type": "object" + }, + "objectSelector": { + "type": [ + "object", + "string" + ] + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "webhookAnnotations": { + "type": [ + "object", + "string" + ] + } + } + }, + "server": { + "type": "object", + "properties": { + "affinity": { + "type": [ + "object", + "string" + ] + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "auditStorage": { + "type": "object", + "properties": { + "accessMode": { + "type": "string" + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "mountPath": { + "type": "string" + }, + "size": { + "type": "string" + }, + "storageClass": { + "type": [ + "null", + "string" + ] + } + } + }, + "authDelegator": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "dataStorage": { + "type": "object", + "properties": { + "accessMode": { + "type": "string" + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "mountPath": { + "type": "string" + }, + "size": { + "type": "string" + }, + "storageClass": { + "type": [ + "null", + "string" + ] + } + } + }, + "dev": { + "type": "object", + "properties": { + "devRootToken": { + "type": "string" + }, + "enabled": { + "type": "boolean" + } + } + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "enterpriseLicense": { + "type": "object", + "properties": { + "secretKey": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, + "extraArgs": { + "type": "string" + }, + "extraContainers": { + "type": [ + "null", + "array" + ] + }, + "extraEnvironmentVars": { + "type": "object" + }, + "extraInitContainers": { + "type": [ + "null", + "array" + ] + }, + "extraLabels": { + "type": "object" + }, + "extraSecretEnvironmentVars": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "ha": { + "type": "object", + "properties": { + "apiAddr": { + "type": [ + "null", + "string" + ] + }, + "clusterAddr": { + "type": [ + "null", + "string" + ] + }, + "config": { + "type": [ + "string", + "object" + ] + }, + "disruptionBudget": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer" + ] + } + } + }, + "enabled": { + "type": "boolean" + }, + "raft": { + "type": "object", + "properties": { + "config": { + "type": [ + "string", + "object" + ] + }, + "enabled": { + "type": "boolean" + }, + "setNodeId": { + "type": "boolean" + } + } + }, + "replicas": { + "type": "integer" + } + } + }, + "image": { + "type": "object", + "properties": { + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ingress": { + "type": "object", + "properties": { + "activeService": { + "type": "boolean" + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": "boolean" + }, + "extraPaths": { + "type": "array" + }, + "hosts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array" + } + } + } + }, + "ingressClassName": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "pathType": { + "type": "string" + }, + "tls": { + "type": "array" + } + } + }, + "livenessProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "path": { + "type": "string" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "logFormat": { + "type": "string" + }, + "logLevel": { + "type": "string" + }, + "networkPolicy": { + "type": "object", + "properties": { + "egress": { + "type": "array" + }, + "enabled": { + "type": "boolean" + } + } + }, + "nodeSelector": { + "type": [ + "null", + "object", + "string" + ] + }, + "postStart": { + "type": "array" + }, + "preStopSleepSeconds": { + "type": "integer" + }, + "priorityClassName": { + "type": "string" + }, + "readinessProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "resources": { + "type": "object" + }, + "route": { + "type": "object", + "properties": { + "activeService": { + "type": "boolean" + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "tls": { + "type": "object" + } + } + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": "boolean" + }, + "externalTrafficPolicy": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "publishNotReadyAddresses": { + "type": "boolean" + }, + "targetPort": { + "type": "integer" + }, + "nodePort": { + "type": "integer" + }, + "activeNodePort": { + "type": "integer" + }, + "standbyNodePort": { + "type": "integer" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "shareProcessNamespace": { + "type": "boolean" + }, + "standalone": { + "type": "object", + "properties": { + "config": { + "type": [ + "string", + "object" + ] + }, + "enabled": { + "type": [ + "string", + "boolean" + ] + } + } + }, + "statefulSet": { + "type": "object", + "properties": { + "annotations": { + "type": [ + "object", + "string" + ] + }, + "securityContext": { + "type": "object", + "properties": { + "container": { + "type": [ + "object", + "string" + ] + }, + "pod": { + "type": [ + "object", + "string" + ] + } + } + } + } + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tolerations": { + "type": [ + "null", + "array", + "string" + ] + }, + "topologySpreadConstraints": { + "type": [ + "null", + "array", + "string" + ] + }, + "updateStrategyType": { + "type": "string" + }, + "volumeMounts": { + "type": [ + "null", + "array" + ] + }, + "volumes": { + "type": [ + "null", + "array" + ] + }, + "hostNetwork": { + "type": "boolean" + } + } + }, + "ui": { + "type": "object", + "properties": { + "activeVaultPodOnly": { + "type": "boolean" + }, + "annotations": { + "type": [ + "object", + "string" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "externalPort": { + "type": "integer" + }, + "externalTrafficPolicy": { + "type": "string" + }, + "publishNotReadyAddresses": { + "type": "boolean" + }, + "serviceNodePort": { + "type": [ + "null", + "integer" + ] + }, + "serviceType": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + } + } + } +} diff --git a/ansible/roles/helm_install/files/vault/values.yaml b/ansible/roles/helm_install/files/vault/values.yaml new file mode 100644 index 0000000..70779eb --- /dev/null +++ b/ansible/roles/helm_install/files/vault/values.yaml @@ -0,0 +1,1121 @@ +# Available parameters and their default values for the Vault chart. + +global: + # enabled is the master enabled switch. Setting this to true or false + # will enable or disable all the components within this chart by default. + enabled: true + + # Image pull secret to use for registry authentication. + # Alternatively, the value may be specified as an array of strings. + imagePullSecrets: [] + # imagePullSecrets: + # - name: image-pull-secret + + # TLS for end-to-end encrypted transport + tlsDisable: true + + # External vault server address for the injector and CSI provider to use. + # Setting this will disable deployment of a vault server. + externalVaultAddr: "" + + # If deploying to OpenShift + openshift: false + + # Create PodSecurityPolicy for pods + psp: + enable: false + # Annotation for PodSecurityPolicy. + # This is a multi-line templated string map, and can also be set as YAML. + annotations: | + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + + serverTelemetry: + # Enable integration with the Prometheus Operator + # See the top level serverTelemetry section below before enabling this feature. + prometheusOperator: false + +injector: + # True if you want to enable vault agent injection. + # @default: global.enabled + enabled: "-" + + replicas: 1 + + # Configures the port the injector should listen on + port: 8080 + + # If multiple replicas are specified, by default a leader will be determined + # so that only one injector attempts to create TLS certificates. + leaderElector: + enabled: true + + # If true, will enable a node exporter metrics endpoint at /metrics. + metrics: + enabled: false + + # Deprecated: Please use global.externalVaultAddr instead. + externalVaultAddr: "" + + # image sets the repo and tag of the vault-k8s image to use for the injector. + image: + repository: "hashicorp/vault-k8s" + tag: "1.0.1" + pullPolicy: IfNotPresent + + # agentImage sets the repo and tag of the Vault image to use for the Vault Agent + # containers. This should be set to the official Vault image. Vault 1.3.1+ is + # required. + agentImage: + repository: "hashicorp/vault" + tag: "1.12.0" + + # The default values for the injected Vault Agent containers. + agentDefaults: + # For more information on configuring resources, see the K8s documentation: + # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + cpuLimit: "500m" + cpuRequest: "250m" + memLimit: "128Mi" + memRequest: "64Mi" + + # Default template type for secrets when no custom template is specified. + # Possible values include: "json" and "map". + template: "map" + + # Default values within Agent's template_config stanza. + templateConfig: + exitOnRetryFailure: true + staticSecretRenderInterval: "" + + # Mount Path of the Vault Kubernetes Auth Method. + authPath: "auth/kubernetes" + + # Configures the log verbosity of the injector. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "info" + + # Configures the log format of the injector. Supported log formats: "standard", "json". + logFormat: "standard" + + # Configures all Vault Agent sidecars to revoke their token when shutting down + revokeOnShutdown: false + + webhook: + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while the webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # matchPolicy specifies the approach to accepting changes based on the rules of + # the MutatingWebhookConfiguration. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy + # for more details. + # + matchPolicy: Exact + + # timeoutSeconds is the amount of seconds before the webhook request will be ignored + # or fails. + # If it is ignored or fails depends on the failurePolicy + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts + # for more details. + # + timeoutSeconds: 30 + + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: | + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - {{ template "vault.name" . }}-agent-injector + + # Extra annotations to attach to the webhook + annotations: {} + + # Deprecated: please use 'webhook.failurePolicy' instead + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # Deprecated: please use 'webhook.namespaceSelector' instead + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # Deprecated: please use 'webhook.objectSelector' instead + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: {} + + # Deprecated: please use 'webhook.annotations' instead + # Extra annotations to attach to the webhook + webhookAnnotations: {} + + certs: + # secretName is the name of the secret that has the TLS certificate and + # private key to serve the injector webhook. If this is null, then the + # injector will default to its automatic management mode that will assign + # a service account to the injector to generate its own certificates. + secretName: null + + # caBundle is a base64-encoded PEM-encoded certificate bundle for the CA + # that signed the TLS certificate that the webhook serves. This must be set + # if secretName is non-null unless an external service like cert-manager is + # keeping the caBundle updated. + caBundle: "" + + # certName and keyName are the names of the files within the secret for + # the TLS cert and private key, respectively. These have reasonable + # defaults but can be customized if necessary. + certName: tls.crt + keyName: tls.key + + # Security context for the pod template and the injector container + # The default pod securityContext is: + # runAsNonRoot: true + # runAsGroup: {{ .Values.injector.gid | default 1000 }} + # runAsUser: {{ .Values.injector.uid | default 100 }} + # fsGroup: {{ .Values.injector.gid | default 1000 }} + # and for container is + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + securityContext: + pod: {} + container: {} + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # extraEnvironmentVars is a list of extra environment variables to set in the + # injector deployment. + extraEnvironmentVars: {} + # KUBERNETES_SERVICE_HOST: kubernetes.default.svc + + # Affinity Settings for injector pods + # This can either be a multi-line string or YAML matching the PodSpec's affinity field. + # Commenting out or setting as empty the affinity variable, will allow + # deployment of multiple replicas to single node services such as Minikube. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: webhook + topologyKey: kubernetes.io/hostname + + # Topology settings for injector pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for injector pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Priority class for injector pods + priorityClassName: "" + + # Extra annotations to attach to the injector pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the injector pods + annotations: {} + + # Extra labels to attach to the agent-injector + # This should be a YAML map of the labels to apply to the injector + extraLabels: {} + + # Should the injector pods run on the host network (useful when using + # an alternate CNI in EKS) + hostNetwork: false + + # Injector service specific config + service: + # Extra annotations to attach to the injector service + annotations: {} + + # Injector serviceAccount specific config + serviceAccount: + # Extra annotations to attach to the injector serviceAccount + annotations: {} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + podDisruptionBudget: {} + # podDisruptionBudget: + # maxUnavailable: 1 + + # strategy for updating the deployment. This can be a multi-line string or a + # YAML map. + strategy: {} + # strategy: | + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + # type: RollingUpdate + +server: + # If true, or "-" with global.enabled true, Vault server will be installed. + # See vault.mode in _helpers.tpl for implementation details. + enabled: "-" + + # [Enterprise Only] This value refers to a Kubernetes secret that you have + # created that contains your enterprise license. If you are not using an + # enterprise image or if you plan to introduce the license key via another + # route, then leave secretName blank ("") or set it to null. + # Requires Vault Enterprise 1.8 or later. + enterpriseLicense: + # The name of the Kubernetes secret that holds the enterprise license. The + # secret must be in the same namespace that Vault is installed into. + secretName: "" + # The key within the Kubernetes secret that holds the enterprise license. + secretKey: "license" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec. + # By default no direct resource request is made. + + image: + repository: "hashicorp/vault" + tag: "1.12.0" + # Overrides the default Image Pull Policy + pullPolicy: IfNotPresent + + # Configure the Update Strategy Type for the StatefulSet + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategyType: "OnDelete" + + # Configure the logging verbosity for the Vault server. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "" + + # Configure the logging format for the Vault server. + # Supported log formats include: standard, json + logFormat: "" + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Ingress allows ingress services to be created to allow external access + # from Kubernetes to access Vault pods. + # If deployment is on OpenShift, the following block is ignored. + # In order to expose the service, use the route section below + ingress: + enabled: false + labels: {} + # traffic: external + annotations: {} + # | + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # or + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # Optionally use ingressClassName instead of deprecated annotation. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation + ingressClassName: "" + + # As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values. + pathType: Prefix + + # When HA mode is enabled and K8s service registration is being used, + # configure the ingress to point to the Vault active service. + activeService: true + hosts: + - host: chart-example.local + paths: [] + ## Extra paths to prepend to the host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # service: + # name: ssl-redirect + # port: + # number: use-annotation + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # OpenShift only - create a route to expose the service + # By default the created route will be of type passthrough + route: + enabled: false + + # When HA mode is enabled and K8s service registration is being used, + # configure the route to point to the Vault active service. + activeService: true + + labels: {} + annotations: {} + host: chart-example.local + # tls will be passed directly to the route's TLS config, which + # can be used to configure other termination methods that terminate + # TLS at the router + tls: + termination: passthrough + + # authDelegator enables a cluster role binding to be attached to the service + # account. This cluster role binding can be used to setup Kubernetes auth + # method. https://www.vaultproject.io/docs/auth/kubernetes.html + authDelegator: + enabled: true + + # extraInitContainers is a list of init containers. Specified as a YAML list. + # This is useful if you need to run a script to provision TLS certificates or + # write out configuration files in a dynamic way. + extraInitContainers: null + # # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder, + # # which is defined in the volumes value. + # - name: oauthapp + # image: "alpine" + # command: [sh, -c] + # args: + # - cd /tmp && + # wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz && + # tar -xf oauthapp.xz && + # mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp && + # chmod +x /usr/local/libexec/vault/oauthapp + # volumeMounts: + # - name: plugins + # mountPath: /usr/local/libexec/vault + + # extraContainers is a list of sidecar containers. Specified as a YAML list. + extraContainers: null + + # shareProcessNamespace enables process namespace sharing between Vault and the extraContainers + # This is useful if Vault must be signaled, e.g. to send a SIGHUP for a log rotation + shareProcessNamespace: false + + # extraArgs is a string containing additional Vault server arguments. + extraArgs: "" + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + # If you need to use a http path instead of the default exec + # path: /v1/sys/health?standbyok=true + + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to enable a livenessProbe for the pods + livenessProbe: + enabled: false + path: "/v1/sys/health?standbyok=true" + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 60 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Optional duration in seconds the pod needs to terminate gracefully. + # See: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ + terminationGracePeriodSeconds: 10 + + # Used to set the sleep time during the preStop step + preStopSleepSeconds: 5 + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + # extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be + # used to include variables required for auto-unseal. + extraEnvironmentVars: {} + # GOOGLE_REGION: global + # GOOGLE_PROJECT: myproject + # GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json + + # extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set. + # These variables take value from existing Secret objects. + extraSecretEnvironmentVars: [] + # - envName: AWS_SECRET_ACCESS_KEY + # secretName: vault + # secretKey: AWS_SECRET_ACCESS_KEY + + # Deprecated: please use 'volumes' instead. + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Vault in the path `/vault/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # path: null # default is `/vault/userconfig` + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: plugins + # emptyDir: {} + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - mountPath: /usr/local/libexec/vault + # name: plugins + # readOnly: true + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + # affinity: | + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: {{ template "vault.name" . }} + # app.kubernetes.io/instance: "{{ .Release.Name }}" + # component: server + # topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default, the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round-robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # Do not wait for pods to be ready + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + # This configures the Vault Statefulset to create a PVC for data + # storage when using the file or raft backend storage engines. + # See https://www.vaultproject.io/docs/configuration/storage/index.html to know more + dataStorage: + enabled: true + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/data" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # This configures the Vault Statefulset to create a PVC for audit + # logs. Once Vault is deployed, initialized, and unsealed, Vault must + # be configured to use this for audit logs. This will be mounted to + # /vault/audit + # See https://www.vaultproject.io/docs/audit/index.html to know more + auditStorage: + enabled: false + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/audit" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # Run Vault in "dev" mode. This requires no further setup, no state management, + # and no initialization. This is useful for experimenting with Vault without + # needing to unseal, store keys, et. al. All data is lost on restart - do not + # use dev mode for anything other than experimenting. + # See https://www.vaultproject.io/docs/concepts/dev-server.html to know more + dev: + enabled: false + + # Set VAULT_DEV_ROOT_TOKEN_ID value + devRootToken: "root" + + # Run Vault in "standalone" mode. This is the default mode that will deploy if + # no arguments are given to helm. This requires a PVC for data storage to use + # the "file" backend. This mode is not highly available and should not be scaled + # past a single replica. + standalone: + enabled: "-" + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data + # and store data there. This is only used when using a Replica count of 1, and + # using a stateful set. This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + storage "file" { + path = "/vault/data" + } + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Example configuration for enabling Prometheus metrics in your config. + #telemetry { + # prometheus_retention_time = "30s", + # disable_hostname = true + #} + + # Run Vault in "HA" mode. There are no storage requirements unless the audit log + # persistence is required. In HA mode Vault will configure itself to use Consul + # for its storage backend. The default configuration provided will work the Consul + # Helm project by default. It is possible to manually configure Vault to use a + # different HA backend. + ha: + enabled: false + replicas: 3 + + # Set the api_addr configuration for Vault HA + # See https://www.vaultproject.io/docs/configuration#api_addr + # If set to null, this will be set to the Pod IP Address + apiAddr: null + + # Set the cluster_addr confuguration for Vault HA + # See https://www.vaultproject.io/docs/configuration#cluster_addr + # If set to null, this will be set to https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201 + clusterAddr: null + + # Enables Vault's integrated Raft storage. Unlike the typical HA modes where + # Vault's persistence is external (such as Consul), enabling Raft mode will create + # persistent volumes for Vault to store data according to the configuration under server.dataStorage. + # The Vault cluster will coordinate leader elections and failovers internally. + raft: + + # Enables Raft integrated storage + enabled: false + # Set the Node Raft ID to the name of the pod + setNodeId: false + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a Consul for its HA storage backend. + # This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "consul" { + path = "vault" + address = "HOST_IP:8500" + } + + service_registration "kubernetes" {} + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev-246514" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Example configuration for enabling Prometheus metrics. + # If you are using Prometheus Operator you can enable a ServiceMonitor resource below. + # You may wish to enable unauthenticated metrics in the listener block above. + #telemetry { + # prometheus_retention_time = "30s", + # disable_hostname = true + #} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # Definition of the serviceAccount used to run Vault. + # These options are also used when using an external Vault server to validate + # Kubernetes tokens. + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Settings for the statefulSet used to run Vault. + statefulSet: + # Extra annotations for the statefulSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the statefulSet. + annotations: {} + + # Set the pod and container security contexts. + # If not set, these will default to, and for *not* OpenShift: + # pod: + # runAsNonRoot: true + # runAsGroup: {{ .Values.server.gid | default 1000 }} + # runAsUser: {{ .Values.server.uid | default 100 }} + # fsGroup: {{ .Values.server.gid | default 1000 }} + # container: + # allowPrivilegeEscalation: false + # + # If not set, these will default to, and for OpenShift: + # pod: {} + # container: {} + securityContext: + pod: {} + container: {} + + # Should the server pods run on the host network + hostNetwork: false + +# Vault UI +ui: + # True if you want to create a Service entry for the Vault UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + enabled: false + publishNotReadyAddresses: true + # The service should only contain selectors for active Vault pod + activeVaultPodOnly: false + serviceType: "ClusterIP" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + #loadBalancerSourceRanges: + # - 10.0.0.0/16 + # - 1.78.23.3/32 + + # loadBalancerIP: + + # Extra annotations to attach to the ui service + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the ui service + annotations: {} + +# secrets-store-csi-driver-provider-vault +csi: + # True if you want to install a secrets-store-csi-driver-provider-vault daemonset. + # + # Requires installing the secrets-store-csi-driver separately, see: + # https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver + # + # With the driver and provider installed, you can mount Vault secrets into volumes + # similar to the Vault Agent injector, and you can also sync those secrets into + # Kubernetes secrets. + enabled: false + + image: + repository: "hashicorp/vault-csi-provider" + tag: "1.2.0" + pullPolicy: IfNotPresent + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: tls + # secret: + # secretName: vault-tls + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - name: tls + # mountPath: "/vault/tls" + # readOnly: true + + resources: {} + # resources: + # requests: + # cpu: 50m + # memory: 128Mi + # limits: + # cpu: 50m + # memory: 128Mi + + # Settings for the daemonSet used to run the provider. + daemonSet: + updateStrategy: + type: RollingUpdate + maxUnavailable: "" + # Extra annotations for the daemonSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the daemonSet. + annotations: {} + # Provider host path (must match the CSI provider's path) + providersDir: "/etc/kubernetes/secrets-store-csi-providers" + # Kubelet host path + kubeletRootDir: "/var/lib/kubelet" + # Extra labels to attach to the vault-csi-provider daemonSet + # This should be a YAML map of the labels to apply to the csi provider daemonSet + extraLabels: {} + # security context for the pod template and container in the csi provider daemonSet + securityContext: + pod: {} + container: {} + + pod: + # Extra annotations for the provider pods. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the pod. + annotations: {} + + # Toleration Settings for provider pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # Extra labels to attach to the vault-csi-provider pod + # This should be a YAML map of the labels to apply to the csi provider pod + extraLabels: {} + + + + # Priority class for csi pods + priorityClassName: "" + + serviceAccount: + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Extra labels to attach to the vault-csi-provider serviceAccount + # This should be a YAML map of the labels to apply to the csi provider serviceAccount + extraLabels: {} + + # Used to configure readinessProbe for the pods. + readinessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to configure livenessProbe for the pods. + livenessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Enables debug logging. + debug: false + + # Pass arbitrary additional arguments to vault-csi-provider. + # See https://www.vaultproject.io/docs/platform/k8s/csi/configurations#command-line-arguments + # for the available command line flags. + extraArgs: [] + +# Vault is able to collect and publish various runtime metrics. +# Enabling this feature requires setting adding `telemetry{}` stanza to +# the Vault configuration. There are a few examples included in the `config` sections above. +# +# For more information see: +# https://www.vaultproject.io/docs/configuration/telemetry +# https://www.vaultproject.io/docs/internals/telemetry +serverTelemetry: + # Enable support for the Prometheus Operator. Currently, this chart does not support + # authenticating to Vault's metrics endpoint, so the following `telemetry{}` must be included + # in the `listener "tcp"{}` stanza + # telemetry { + # unauthenticated_metrics_access = "true" + # } + # + # See the `standalone.config` for a more complete example of this. + # + # In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration: + # + # example: + # telemetry { + # prometheus_retention_time = "30s", + # disable_hostname = true + # } + # + # Configuration for monitoring the Vault server. + serviceMonitor: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + # + # Instructions on how to install the Helm chart can be found here: + # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + # More information can be found here: + # https://github.com/prometheus-operator/prometheus-operator + # https://github.com/prometheus-operator/kube-prometheus + + # Enable deployment of the Vault Server ServiceMonitor CustomResource. + enabled: false + + # Selector labels to add to the ServiceMonitor. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Interval at which Prometheus scrapes metrics + interval: 30s + + # Timeout for Prometheus scrapes + scrapeTimeout: 10s + + prometheusRules: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + + # Deploy the PrometheusRule custom resource for AlertManager based alerts. + # Requires that AlertManager is properly deployed. + enabled: false + + # Selector labels to add to the PrometheusRules. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Some example rules. + rules: {} + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 500ms on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 + # for: 5m + # labels: + # severity: warning + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 1s on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 + # for: 5m + # labels: + # severity: critical diff --git a/ansible/roles/helm_install/files/vault_agent/configmap.yaml b/ansible/roles/helm_install/files/vault_agent/configmap.yaml new file mode 100644 index 0000000..5864097 --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dsk-vault-agent-config +data: + server.tmpl: | + {{ with secret "tls/data/server" }}{{ toJSON .Data.data }} + {{ end }} + + client.tmpl: | + {{ with secret "tls/data/client" }}{{ toJSON .Data.data }} + {{ end }} + + agent.hcl: | + pid_file = "./pidfile" + + vault { + address="http://vault-ui.dsk-middle:8200" + } + + auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/vault-agent/role-id" + secret_id_file_path = "/vault-agent/secret-id" + remove_secret_id_file_after_reading = false + } + } + + sink { + type = "file" + config = { + path = "/vault-agent/.vault-token" + mode = 0644 + } + } + } + + template_config { + static_secret_render_interval = "10s" + } + + template { + source = "/vault-agent/conf/server.tmpl" + destination = "/vault-agent/serverTls" + } + + template { + source = "/vault-agent/conf/client.tmpl" + destination = "/vault-agent/clientTls" + } diff --git a/ansible/roles/helm_install/files/vault_agent/deployment.yaml b/ansible/roles/helm_install/files/vault_agent/deployment.yaml new file mode 100644 index 0000000..7860a29 --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dsk-vault-agent + labels: + app: dsk-vault-agent +spec: + replicas: 1 + selector: + matchLabels: + app: dsk-vault-agent + template: + metadata: + labels: + app: dsk-vault-agent + spec: + containers: + - name: vault-agent + image: vault + volumeMounts: + - name: vault-volume + mountPath: /vault-agent + - name: config + mountPath: /vault-agent/conf + command: [ "vault" ] + args: [ "agent", "-config=/vault-agent/conf/agent.hcl" ] + volumes: + - name: vault-volume + persistentVolumeClaim: + claimName: dsk-vault-tls-pvc + - name: config + configMap: + name: dsk-vault-agent-config + items: + - key: agent.hcl + path: agent.hcl + - key: server.tmpl + path: server.tmpl + - key: client.tmpl + path: client.tmpl + + diff --git a/ansible/roles/helm_install/files/vault_agent/pvc.yaml b/ansible/roles/helm_install/files/vault_agent/pvc.yaml new file mode 100644 index 0000000..712472e --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: dsk-vault-tls-pvc +spec: + storageClassName: nfs-provisioner-dev + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/ansible/roles/helm_install/files/vault_agent/test/configmap.yaml b/ansible/roles/helm_install/files/vault_agent/test/configmap.yaml new file mode 100644 index 0000000..5864097 --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/test/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dsk-vault-agent-config +data: + server.tmpl: | + {{ with secret "tls/data/server" }}{{ toJSON .Data.data }} + {{ end }} + + client.tmpl: | + {{ with secret "tls/data/client" }}{{ toJSON .Data.data }} + {{ end }} + + agent.hcl: | + pid_file = "./pidfile" + + vault { + address="http://vault-ui.dsk-middle:8200" + } + + auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/vault-agent/role-id" + secret_id_file_path = "/vault-agent/secret-id" + remove_secret_id_file_after_reading = false + } + } + + sink { + type = "file" + config = { + path = "/vault-agent/.vault-token" + mode = 0644 + } + } + } + + template_config { + static_secret_render_interval = "10s" + } + + template { + source = "/vault-agent/conf/server.tmpl" + destination = "/vault-agent/serverTls" + } + + template { + source = "/vault-agent/conf/client.tmpl" + destination = "/vault-agent/clientTls" + } diff --git a/ansible/roles/helm_install/files/vault_agent/test/deployment.yaml b/ansible/roles/helm_install/files/vault_agent/test/deployment.yaml new file mode 100644 index 0000000..650af00 --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/test/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dsk-vault-agent + labels: + app: dsk-vault-agent +spec: + replicas: 1 + selector: + matchLabels: + app: dsk-vault-agent + template: + metadata: + labels: + app: dsk-vault-agent + spec: + containers: + - name: vault-agent + image: vault + volumeMounts: + - name: vault-volume + mountPath: /vault-agent + - name: config + mountPath: /vault-agent/conf + command: [ "vault" ] + args: [ "agent", "-config=/vault-agent/conf/agent.hcl" ] + volumes: + - name: vault-volume + persistentVolumeClaim: + claimName: dsk-vault-test + - name: config + configMap: + name: dsk-vault-agent-config + items: + - key: agent.hcl + path: agent.hcl + - key: server.tmpl + path: server.tmpl + - key: client.tmpl + path: client.tmpl + + diff --git a/ansible/roles/helm_install/files/vault_agent/test/pvc.yaml b/ansible/roles/helm_install/files/vault_agent/test/pvc.yaml new file mode 100644 index 0000000..612ed9a --- /dev/null +++ b/ansible/roles/helm_install/files/vault_agent/test/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: dsk-vault-test +spec: + storageClassName: nfs-client-test + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/ansible/roles/helm_install/tasks/helm-chart-install.yml b/ansible/roles/helm_install/tasks/helm-chart-install.yml new file mode 100644 index 0000000..9d0ec9f --- /dev/null +++ b/ansible/roles/helm_install/tasks/helm-chart-install.yml @@ -0,0 +1,37 @@ +--- +- name: 1. helmchart install (default) + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ kubernetes_middleware_namespace }}" + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/{{item}}/values.yaml" + with_items: + - kafka + +- name: 2. helmchart install (override-values) + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ kubernetes_middleware_namespace }}" + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/{{item}}/override-values.yaml" + with_items: + - ingress-nginx + - druid + - elasticsearch + - keycloak + - mongo-dsk + - mongo-manifest + - postgresql + - rabbitmq + - redis + diff --git a/ansible/roles/helm_install/tasks/helm-chart-install.yml_bak b/ansible/roles/helm_install/tasks/helm-chart-install.yml_bak new file mode 100644 index 0000000..57bc4ee --- /dev/null +++ b/ansible/roles/helm_install/tasks/helm-chart-install.yml_bak @@ -0,0 +1,23 @@ +--- +- name: helmchart install + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ kubernetes_middleware_namespace }}" + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/{{item}}/override-values.yaml" + with_items: + - ingress-nginx + - druid + - elasticsearch + - kafka + - keycloak + - mongo-dsk + - mongo-manifest + - postgresql + - rabbitmq + - redis diff --git a/ansible/roles/helm_install/tasks/helm-chart-nginx.yml b/ansible/roles/helm_install/tasks/helm-chart-nginx.yml new file mode 100644 index 0000000..748604a --- /dev/null +++ b/ansible/roles/helm_install/tasks/helm-chart-nginx.yml @@ -0,0 +1,23 @@ +--- +- name: Create Nginx Ingress Controller deployment + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + with_items: + - ingress-nginx + - druid + - elasticsearch + - kafka + - keycloak + - mongo-dsk + - mongo-manifest + - postgresql + - rabbitmq + - redis + - vault + - vault_agent diff --git a/ansible/roles/helm_install/tasks/helm-install.yml b/ansible/roles/helm_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/roles/helm_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/roles/helm_install/tasks/main.yml b/ansible/roles/helm_install/tasks/main.yml new file mode 100644 index 0000000..53b32fe --- /dev/null +++ b/ansible/roles/helm_install/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: helm-chart-install.yml + tags: helm-chart-install diff --git a/ansible/roles/kubernetes_install/README.md b/ansible/roles/kubernetes_install/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/roles/kubernetes_install/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/roles/kubernetes_install/defaults/main.yml b/ansible/roles/kubernetes_install/defaults/main.yml new file mode 100644 index 0000000..44c7e04 --- /dev/null +++ b/ansible/roles/kubernetes_install/defaults/main.yml @@ -0,0 +1,131 @@ +helm_checksum: sha256:3156e4fe5f034e5b127cf165d61a8a1c48eb7a73b14689b273de5e6117df6fe2 +helm_version: v3.2.3 + +kubernetes_version: 1.25.2 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/.helmignore b/ansible/roles/kubernetes_install/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md b/ansible/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/Chart.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/OWNERS b/ansible/roles/kubernetes_install/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/README.md b/ansible/roles/kubernetes_install/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl b/ansible/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/override-values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/temp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/temp2.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/ansible/roles/kubernetes_install/files/ingress-nginx/values.yaml b/ansible/roles/kubernetes_install/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/ansible/roles/kubernetes_install/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/ansible/roles/kubernetes_install/files/kubeconfig b/ansible/roles/kubernetes_install/files/kubeconfig new file mode 100644 index 0000000..95b048c --- /dev/null +++ b/ansible/roles/kubernetes_install/files/kubeconfig @@ -0,0 +1,20 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXdOakV3TkRZME5Gb1hEVE15TVRJd016RXdORFkwTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT0xYCnhYSWpmVWdFdXdxMFZrYXQ0N2d6Q09vRERDL1YvRHl5T0dWZG1PSDEwd1dHZmwrMFI0dkkwaWlHc0ZKRVFqWWMKRGt6THZUOHlRNVpybFpiV3VuVkFYaTlrcHRxY0d2R2NZYXdrMGVIRVdqQ1RRTXo0T2dOd2JuQytKdkd1MnJiVgpaUVVvRzV1cHpDRkM0Q0RtM1h2Ym14RjdYUU11L0FOU045V1UwYS9Td0tvR25EaVMyV1JBOFJpRG9DYURKcGprCjc1azAyMkZOanlOZkxadktmMzFzNTg3OGF1bS9WS0UzeVV4SGhqVzFEeEpTWUMrK1RhV0F0MWpBVXZObWVNQmQKVStmRW0xbUVnNWtnWVBER2d3czBSVHB4WFk1U3dTL3hKVkd2VjRYeExlWEVoWmc2ZlF4c0hvcG1vdVZJOEJLMQpUdW1ram4zTkhrU0dtRVEzczBFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNc2RSdjJzQ2RaUW1ZM1dVZ1U4aUlhOHM5cWpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRXZ5WWorT2QwVSttYkdkSzJyWApTcXdiZjY0M0V4YWlkVDVWQjhPUExoNm1kbVQvTk04cHdYUDg0Y0J2RC9rSzBXTllObkc2RFhTS3BtWjBSM1BmCm03ck8wZ2N4M1dYaDIzWHk5VmJWcGcyK0FXdFRRVDd5dVM3L3U3YjVnV1lQNzVqNk0rOWQvdXNtc3g3alFxNjEKa3NJTzlKNXVmNCtXSHJtQm1WMGNydGZTTjN5bTF2Q2VDZzNNeFpkY3hIUXgrUjNNWFZyYVZFK1NIUE5hdzgzQQppZFI3eDkyenMwY3dRcy9wSGxRK0t5aDFnQ01JQkVFTXM1OERwZU11Q0VUaEZ2elBuL1l6YlVoNUYrcm1KNytoCjJ5VVV0R2x5V3NJQ2l5ODl0Y2pOT1RBN2EvSGJ5ZDk4MmxIM1hLY1ZlUUdYVlJFTFF0UU9sUVNHOVRQU1R4OFcKNWdJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://10.10.30.214:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYzVmZFNYMzFJTWd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1EWXhNRFEyTkRSYUZ3MHlNekV5TURZeE1EUTJORGRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTFBRGVDUGF5elpXTWtpV1AKQzV0RmRzL3orSmpndDYzUzFxMC9TQ2VYTWx0MGRTVGFqb3B5UzRueFVPb1ZXYmY2M016NEQ0RzRDWDNheW1TWgpLa1JmbVJ5SEtNRTVWTll6WUNWSUY1Qm1acEpVUHJ6OU5vZVRocDlQOTV2M3hIWlNBektvKysyZnQ1dzJ1dGJYCnBkN3g0Nk5KSnBPb1I5M2ZxOFFZRkhHOEhhSEd6WjhJbS9CV21KVEJua2R2aGJxczB2R3ZmbkdXRGhFT05wd2IKd2w4Z0kwMFpqTHhBRkZxY1d1MVZtNmJZYXUzVDlrSGdEcEplNUVrSFVFZ2cwWWlOTlZlUzJHdGFaeUpiWUcxZQo4Um9Zd3k0cFlvbzVlMW1sZDJHWE1EUWhkQk1oWGk5R1NsSC9GaWlqN24xTVZYTFpCSk9UL0lSdElQR1dIdmtXCmxVcEQ4UUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUTEhVYjlyQW5XVUptTjFsSUZQSWlHdkxQYQpvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTTFLbEwyL3BwU1pCTGpTd3NWZzhGYTlkWk05RDIzNDNJdzY3Ci9QN1JWKzRjSVJqSXlJZFZGMUgrbkl2bFl0cncrMkZDTHpQYWNYcTZUWi9IUktFVEZtb1FYRnhnVk5GRDZVZXAKTW1sd2FCK1BnUmFRWDZoZjJlRUNXaXBobXpLRitsQnRtNENpci9USlZwTitQQ1Jhc0VkVW9pVUVtUHVsRXYvUQp1bDg3Q3RUbVAzajkxRi94eWFrYmhSS2pPbUY0dFRlZ1E4ZUE2SkFYV0d6S09XMzdZR1BHc091ZXlzU3hzanp0CjdpRjczVGVKdmdtbUllWHkrMkdoTFdIQ0tHVGJUek1UZnpYV0J4ODBXbzVjdFNaWmYzT0NJWWRHM1ViY0lsdXMKR0JreXM4b0phSXFHSE93NUpCRE1PNzliajQrbU01MlJGNmtQbkJDL2duckUyQUdHM3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMUFEZUNQYXl6WldNa2lXUEM1dEZkcy96K0pqZ3Q2M1MxcTAvU0NlWE1sdDBkU1RhCmpvcHlTNG54VU9vVldiZjYzTXo0RDRHNENYM2F5bVNaS2tSZm1SeUhLTUU1Vk5ZellDVklGNUJtWnBKVVByejkKTm9lVGhwOVA5NXYzeEhaU0F6S28rKzJmdDV3MnV0YlhwZDd4NDZOSkpwT29SOTNmcThRWUZIRzhIYUhHelo4SQptL0JXbUpUQm5rZHZoYnFzMHZHdmZuR1dEaEVPTnB3YndsOGdJMDBaakx4QUZGcWNXdTFWbTZiWWF1M1Q5a0hnCkRwSmU1RWtIVUVnZzBZaU5OVmVTMkd0YVp5SmJZRzFlOFJvWXd5NHBZb281ZTFtbGQyR1hNRFFoZEJNaFhpOUcKU2xIL0ZpaWo3bjFNVlhMWkJKT1QvSVJ0SVBHV0h2a1dsVXBEOFFJREFRQUJBb0lCQUZsT0VEb29hY091WnF1OQp4SmM0RGpmeGU2MVNBUDkrNnB6aUdCRTJGRHZ6U0loOFFORGd3eXJNN2VtTzRmV01TZEd2U2lPR0dsZHRPN2djClRtVCtybUthSU5sckk5SjM5T1pnYmhEM0ZCdkxNay9IWHNjVXIzRjdOTDF5WnhuTVdkbmRBbEExbGgxTFljYXMKNytTQW1OYXlsd0w0R21CRHQ0L3NwOVFjNFFoOXRDQXdTMGUvb1k1cnl4QzFBb25zNUFIMmJGNGg3SGRMM0tvWApHMjRjTm9MY2d0K0J4M00wYSs2aGFoSmV6aGhVL2R5L1dRdjlJay9VRlJra3dBdkxvM3VPUVA1bzB0RVpXcXF1CkhUM3VRLzM4bTBwOGE1U21WR250RTNGWERQRlJGUi9aWXBjME1FMHRPYzZ0U3BmUHQzajRqUDBubjRGMStXdEQKTWhBSlNIMENnWUVBNHNzTTJ3Y1lKaUZhdTFCRzd4cXJkVGMrTWl4L3pWWWZtZHRhemtUcEsvRFF3ZmdRWVREbgpWbWRwNW5MRTdyeTRaMU90RGg3d0pKTVMySHRvRW9sTk9YMjVMKzNBaFgzRFUrU1lNeDJ2VU1jcUVzak1hUHFDCjFvM0dxa2JiNmozS2RZck9jWDhFUXVBWlhmc0RTSWxUOHNxcFdRcGFEZE9RcE15Y05WYzNjTDhDZ1lFQTcwNDUKd1d1dXN2UitGWUgrdW12bU9Ndm9VdmNROXJFcnZGcEJkdndiQnJsWHgvWFBNNE9kYXZXV1hQb3hxbWpUeExzcApnV000V0lVUUN6RjZGb2V5Kzc0T2ZmTmJtZG8renNVSkx5bnBPb3AyWUNUWk8vQ1NCNTFXNHN6UTlQaTErak9xCkdqdjVCK1RqV3o0cEdKWWp2eGpXZ2Nha1FDZEgzRHVTeFlYMngwOENnWUJaT0RRb2ZsUDd2Q2RyaFJ0Q3VTVTIKaWJNSUhnVnhEQzZHWW9zSWxvZDhaOUpZWEhSbEo4MzZhZGg1ZGpFUEVtTWhFd1FEaUJ4RTV5OEV4eGVjSXpPawpLRmVRQ1dJeG9kWVR6TndyVDhSR2JQT2FUREJPSkM4UXBObkE1dnRnM1VvbWo2TERkNHAvbkpXZUtUK1RhNk1BCjRzVllhQUFoYkZkODNabWVTbDlmRlFLQmdRQ3RURlQvQVdCT01FaHVndWxaVDNJMWgxVURYL0JrOWdEYU1mSmUKbkV0bUh5cTJvQWdoSWhzSnJqZnB0VFhxVm1lbGZIU2VRcUEzV29VMzFlaTRFQ1ZKc1dVRlNRcjQ2OWU0SFhCOQpPeml2TUQ1eGViM25ibHdTTDVzUU80ckhIS1dNUDRYYjRicUNRUHQweEJzMnR1UEVLOVNMdnJLTDB1WnpVcUVECmNmUTRlUUtCZ1FDV0xtUEJwUDVMVFA0YmxRVDFMMy9LQ0cweGxzL2pFL1JBVlE5VmY1UVVPeFlUaHBlZTUzdXAKUmwxOEVuSks1THpuNlF2TzRMeXRGdVNLeG5aRnYxY1pycUNKR0owcXhNc3VpTGVtbnYxb2NGMDdiWkYwV0pzWQowak9Ha0oweXZtUlFBVzhQUEhJeGdzN3Y3ekNzZHdTcmx3REEvYnNxa1BUVEJxNlpSUHhSUXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + diff --git a/ansible/roles/kubernetes_install/handlers/main.yml b/ansible/roles/kubernetes_install/handlers/main.yml new file mode 100644 index 0000000..4bf601f --- /dev/null +++ b/ansible/roles/kubernetes_install/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted diff --git a/ansible/roles/kubernetes_install/meta/main.yml b/ansible/roles/kubernetes_install/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/roles/kubernetes_install/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/kubernetes_install/tasks/helm-chart-nginx.yml b/ansible/roles/kubernetes_install/tasks/helm-chart-nginx.yml new file mode 100644 index 0000000..3fd6896 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/helm-chart-nginx.yml @@ -0,0 +1,13 @@ +--- +- name: Create Nginx Ingress Controller deployment + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{item}}" + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + with_items: + - nginx-ingress + diff --git a/ansible/roles/kubernetes_install/tasks/helm-install.yml b/ansible/roles/kubernetes_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/roles/kubernetes_install/tasks/k8s-helm-chart.yml b/ansible/roles/kubernetes_install/tasks/k8s-helm-chart.yml new file mode 100644 index 0000000..6a4e308 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/k8s-helm-chart.yml @@ -0,0 +1,7 @@ +--- +# Set up master. +- include_tasks: helm-install.yml + when: kubernetes_role == 'master' + +- include_tasks: helm-chart-nginx.yml + when: kubernetes_role == 'master' diff --git a/ansible/roles/kubernetes_install/tasks/k8s-main.yml b/ansible/roles/kubernetes_install/tasks/k8s-main.yml new file mode 100644 index 0000000..346291e --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/k8s-main.yml @@ -0,0 +1,68 @@ +--- +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + #force: yes + #dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + + + +# Set up master. +- include_tasks: k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: k8s-node.yml + when: kubernetes_role == 'node' + diff --git a/ansible/roles/kubernetes_install/tasks/k8s-master.yml b/ansible/roles/kubernetes_install/tasks/k8s-master.yml new file mode 100644 index 0000000..037a542 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/k8s-master.yml @@ -0,0 +1,34 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + +- name: Configure Calico networking. + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_calico_manifest_file }} + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' diff --git a/ansible/roles/kubernetes_install/tasks/k8s-node.yml b/ansible/roles/kubernetes_install/tasks/k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/ansible/roles/kubernetes_install/tasks/main.yml b/ansible/roles/kubernetes_install/tasks/main.yml new file mode 100644 index 0000000..d8978ed --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- include: os-main.yml + tags: os-main + +- include: os-runtime.yml + tags: os-runtime + +- include: k8s-main.yml + tags: k8s-main + diff --git a/ansible/roles/kubernetes_install/tasks/os-main.yml b/ansible/roles/kubernetes_install/tasks/os-main.yml new file mode 100644 index 0000000..20f4552 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/os-main.yml @@ -0,0 +1,70 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/roles/kubernetes_install/tasks/os-runtime.yml b/ansible/roles/kubernetes_install/tasks/os-runtime.yml new file mode 100644 index 0000000..60be402 --- /dev/null +++ b/ansible/roles/kubernetes_install/tasks/os-runtime.yml @@ -0,0 +1,45 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + diff --git a/ansible/roles/kubernetes_install/templates/config.toml.j2 b/ansible/roles/kubernetes_install/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/ansible/roles/kubernetes_install/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/ansible/roles/kubernetes_install/templates/hosts.j2 b/ansible/roles/kubernetes_install/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/ansible/roles/kubernetes_install/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/ansible/roles/kubernetes_install/templates/yaml2toml_macro.j2 b/ansible/roles/kubernetes_install/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/ansible/roles/kubernetes_install/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/ansible/roles/kubernetes_install/tests/inventory b/ansible/roles/kubernetes_install/tests/inventory new file mode 100644 index 0000000..44d2fb2 --- /dev/null +++ b/ansible/roles/kubernetes_install/tests/inventory @@ -0,0 +1,17 @@ +[master] +10.10.30.214 + +[worker] +10.10.30.215 +10.10.30.216 + +[cluster:children] +master +worker + +[master:vars] +kubernetes_role="master" + +[worker:vars] +kubernetes_role="node" + diff --git a/ansible/roles/kubernetes_install/tests/test.yml b/ansible/roles/kubernetes_install/tests/test.yml new file mode 100644 index 0000000..d2103bc --- /dev/null +++ b/ansible/roles/kubernetes_install/tests/test.yml @@ -0,0 +1,6 @@ +--- +- hosts: cluster + become: true + gather_facts: true + roles: + - role: kubernetes_install diff --git a/ansible/roles/kubernetes_install/vars/main.yml b/ansible/roles/kubernetes_install/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/ansible/roles/kubernetes_install/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache